]> git.proxmox.com Git - ovs.git/blame - ofproto/ofproto-dpif.c
ofp-msgs: ensure that l2 is set in ofpmp_reserve()
[ovs.git] / ofproto / ofproto-dpif.c
CommitLineData
abe529af 1/*
e09ee259 2 * Copyright (c) 2009, 2010, 2011, 2012, 2013 Nicira, Inc.
abe529af
BP
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include <config.h>
18
5bee6e26 19#include "ofproto/ofproto-provider.h"
abe529af
BP
20
21#include <errno.h>
22
abe529af 23#include "bond.h"
daff3353 24#include "bundle.h"
abe529af
BP
25#include "byte-order.h"
26#include "connmgr.h"
27#include "coverage.h"
28#include "cfm.h"
29#include "dpif.h"
30#include "dynamic-string.h"
31#include "fail-open.h"
32#include "hmapx.h"
33#include "lacp.h"
75a75043 34#include "learn.h"
abe529af 35#include "mac-learning.h"
816fd533 36#include "meta-flow.h"
abe529af 37#include "multipath.h"
0a740f48 38#include "netdev-vport.h"
abe529af
BP
39#include "netdev.h"
40#include "netlink.h"
41#include "nx-match.h"
42#include "odp-util.h"
43#include "ofp-util.h"
44#include "ofpbuf.h"
f25d0cf3 45#include "ofp-actions.h"
31a19d69 46#include "ofp-parse.h"
abe529af 47#include "ofp-print.h"
9d6ac44e 48#include "ofproto-dpif-governor.h"
bae473fe 49#include "ofproto-dpif-sflow.h"
abe529af 50#include "poll-loop.h"
0d085684 51#include "simap.h"
27022416 52#include "smap.h"
abe529af 53#include "timer.h"
b9ad7294 54#include "tunnel.h"
6c1491fb 55#include "unaligned.h"
abe529af
BP
56#include "unixctl.h"
57#include "vlan-bitmap.h"
58#include "vlog.h"
59
60VLOG_DEFINE_THIS_MODULE(ofproto_dpif);
61
abe529af 62COVERAGE_DEFINE(ofproto_dpif_expired);
abe529af
BP
63COVERAGE_DEFINE(ofproto_dpif_xlate);
64COVERAGE_DEFINE(facet_changed_rule);
abe529af
BP
65COVERAGE_DEFINE(facet_revalidate);
66COVERAGE_DEFINE(facet_unexpected);
9d6ac44e 67COVERAGE_DEFINE(facet_suppress);
abe529af 68
29901626 69/* Maximum depth of flow table recursion (due to resubmit actions) in a
abe529af 70 * flow translation. */
1642690c 71#define MAX_RESUBMIT_RECURSION 64
abe529af 72
9cdaaebe
BP
73/* Number of implemented OpenFlow tables. */
74enum { N_TABLES = 255 };
c57b2226
BP
75enum { TBL_INTERNAL = N_TABLES - 1 }; /* Used for internal hidden rules. */
76BUILD_ASSERT_DECL(N_TABLES >= 2 && N_TABLES <= 255);
9cdaaebe 77
abe529af
BP
78struct ofport_dpif;
79struct ofproto_dpif;
a088a1ff 80struct flow_miss;
abe529af
BP
81
82struct rule_dpif {
83 struct rule up;
84
abe529af
BP
85 /* These statistics:
86 *
87 * - Do include packets and bytes from facets that have been deleted or
88 * whose own statistics have been folded into the rule.
89 *
90 * - Do include packets and bytes sent "by hand" that were accounted to
91 * the rule without any facet being involved (this is a rare corner
92 * case in rule_execute()).
93 *
94 * - Do not include packet or bytes that can be obtained from any facet's
95 * packet_count or byte_count member or that can be obtained from the
b0f7b9b5 96 * datapath by, e.g., dpif_flow_get() for any subfacet.
abe529af
BP
97 */
98 uint64_t packet_count; /* Number of packets received. */
99 uint64_t byte_count; /* Number of bytes received. */
100
54a9cbc9
BP
101 tag_type tag; /* Caches rule_calculate_tag() result. */
102
abe529af
BP
103 struct list facets; /* List of "struct facet"s. */
104};
105
106static struct rule_dpif *rule_dpif_cast(const struct rule *rule)
107{
108 return rule ? CONTAINER_OF(rule, struct rule_dpif, up) : NULL;
109}
110
29901626 111static struct rule_dpif *rule_dpif_lookup(struct ofproto_dpif *,
c57b2226
BP
112 const struct flow *);
113static struct rule_dpif *rule_dpif_lookup__(struct ofproto_dpif *,
114 const struct flow *,
115 uint8_t table);
c376f9a3
IY
116static struct rule_dpif *rule_dpif_miss_rule(struct ofproto_dpif *ofproto,
117 const struct flow *flow);
abe529af 118
112bc5f4
BP
119static void rule_credit_stats(struct rule_dpif *,
120 const struct dpif_flow_stats *);
18b2a258 121static void flow_push_stats(struct rule_dpif *, const struct flow *,
112bc5f4 122 const struct dpif_flow_stats *);
822d9414 123static tag_type rule_calculate_tag(const struct flow *,
5cb7a798 124 const struct minimask *, uint32_t basis);
b0f7b9b5
BP
125static void rule_invalidate(const struct rule_dpif *);
126
abe529af
BP
127#define MAX_MIRRORS 32
128typedef uint32_t mirror_mask_t;
129#define MIRROR_MASK_C(X) UINT32_C(X)
130BUILD_ASSERT_DECL(sizeof(mirror_mask_t) * CHAR_BIT >= MAX_MIRRORS);
131struct ofmirror {
132 struct ofproto_dpif *ofproto; /* Owning ofproto. */
133 size_t idx; /* In ofproto's "mirrors" array. */
134 void *aux; /* Key supplied by ofproto's client. */
135 char *name; /* Identifier for log messages. */
136
137 /* Selection criteria. */
138 struct hmapx srcs; /* Contains "struct ofbundle *"s. */
139 struct hmapx dsts; /* Contains "struct ofbundle *"s. */
140 unsigned long *vlans; /* Bitmap of chosen VLANs, NULL selects all. */
141
9ba15e2a 142 /* Output (exactly one of out == NULL and out_vlan == -1 is true). */
abe529af
BP
143 struct ofbundle *out; /* Output port or NULL. */
144 int out_vlan; /* Output VLAN or -1. */
9ba15e2a 145 mirror_mask_t dup_mirrors; /* Bitmap of mirrors with the same output. */
9d24de3b
JP
146
147 /* Counters. */
148 int64_t packet_count; /* Number of packets sent. */
149 int64_t byte_count; /* Number of bytes sent. */
abe529af
BP
150};
151
152static void mirror_destroy(struct ofmirror *);
9d24de3b
JP
153static void update_mirror_stats(struct ofproto_dpif *ofproto,
154 mirror_mask_t mirrors,
155 uint64_t packets, uint64_t bytes);
abe529af 156
abe529af 157struct ofbundle {
abe529af 158 struct hmap_node hmap_node; /* In struct ofproto's "bundles" hmap. */
6e492d81 159 struct ofproto_dpif *ofproto; /* Owning ofproto. */
abe529af
BP
160 void *aux; /* Key supplied by ofproto's client. */
161 char *name; /* Identifier for log messages. */
162
163 /* Configuration. */
164 struct list ports; /* Contains "struct ofport"s. */
ecac4ebf 165 enum port_vlan_mode vlan_mode; /* VLAN mode */
abe529af
BP
166 int vlan; /* -1=trunk port, else a 12-bit VLAN ID. */
167 unsigned long *trunks; /* Bitmap of trunked VLANs, if 'vlan' == -1.
168 * NULL if all VLANs are trunked. */
169 struct lacp *lacp; /* LACP if LACP is enabled, otherwise NULL. */
170 struct bond *bond; /* Nonnull iff more than one port. */
5e9ceccd 171 bool use_priority_tags; /* Use 802.1p tag for frames in VLAN 0? */
abe529af
BP
172
173 /* Status. */
9e1fd49b 174 bool floodable; /* True if no port has OFPUTIL_PC_NO_FLOOD set. */
abe529af
BP
175
176 /* Port mirroring info. */
177 mirror_mask_t src_mirrors; /* Mirrors triggered when packet received. */
178 mirror_mask_t dst_mirrors; /* Mirrors triggered when packet sent. */
179 mirror_mask_t mirror_out; /* Mirrors that output to this bundle. */
180};
181
182static void bundle_remove(struct ofport *);
7bde8dd8 183static void bundle_update(struct ofbundle *);
abe529af
BP
184static void bundle_destroy(struct ofbundle *);
185static void bundle_del_port(struct ofport_dpif *);
186static void bundle_run(struct ofbundle *);
187static void bundle_wait(struct ofbundle *);
4acbc98d 188static struct ofbundle *lookup_input_bundle(const struct ofproto_dpif *,
70c2fd56
BP
189 uint16_t in_port, bool warn,
190 struct ofport_dpif **in_ofportp);
abe529af 191
33158a18
JP
192/* A controller may use OFPP_NONE as the ingress port to indicate that
193 * it did not arrive on a "real" port. 'ofpp_none_bundle' exists for
194 * when an input bundle is needed for validation (e.g., mirroring or
195 * OFPP_NORMAL processing). It is not connected to an 'ofproto' or have
196 * any 'port' structs, so care must be taken when dealing with it. */
197static struct ofbundle ofpp_none_bundle = {
198 .name = "OFPP_NONE",
199 .vlan_mode = PORT_VLAN_TRUNK
200};
201
21f7563c
JP
202static void stp_run(struct ofproto_dpif *ofproto);
203static void stp_wait(struct ofproto_dpif *ofproto);
851bf71d
EJ
204static int set_stp_port(struct ofport *,
205 const struct ofproto_port_stp_settings *);
21f7563c 206
5da5ec37
BP
207static bool ofbundle_includes_vlan(const struct ofbundle *, uint16_t vlan);
208
abe529af
BP
209struct action_xlate_ctx {
210/* action_xlate_ctx_init() initializes these members. */
211
212 /* The ofproto. */
213 struct ofproto_dpif *ofproto;
214
215 /* Flow to which the OpenFlow actions apply. xlate_actions() will modify
216 * this flow when actions change header fields. */
217 struct flow flow;
218
219 /* The packet corresponding to 'flow', or a null pointer if we are
220 * revalidating without a packet to refer to. */
221 const struct ofpbuf *packet;
222
3de9590b
BP
223 /* Should OFPP_NORMAL update the MAC learning table? Should "learn"
224 * actions update the flow table?
225 *
226 * We want to update these tables if we are actually processing a packet,
227 * or if we are accounting for packets that the datapath has processed, but
228 * not if we are just revalidating. */
229 bool may_learn;
75a75043 230
18b2a258
BP
231 /* The rule that we are currently translating, or NULL. */
232 struct rule_dpif *rule;
54834960 233
0e553d9c
BP
234 /* Union of the set of TCP flags seen so far in this flow. (Used only by
235 * NXAST_FIN_TIMEOUT. Set to zero to avoid updating updating rules'
236 * timeouts.) */
237 uint8_t tcp_flags;
238
112bc5f4
BP
239 /* If nonnull, flow translation calls this function just before executing a
240 * resubmit or OFPP_TABLE action. In addition, disables logging of traces
241 * when the recursion depth is exceeded.
242 *
243 * 'rule' is the rule being submitted into. It will be null if the
244 * resubmit or OFPP_TABLE action didn't find a matching rule.
245 *
246 * This is normally null so the client has to set it manually after
247 * calling action_xlate_ctx_init(). */
248 void (*resubmit_hook)(struct action_xlate_ctx *, struct rule_dpif *rule);
249
479df176
BP
250 /* If nonnull, flow translation calls this function to report some
251 * significant decision, e.g. to explain why OFPP_NORMAL translation
252 * dropped a packet. */
253 void (*report_hook)(struct action_xlate_ctx *, const char *s);
254
112bc5f4
BP
255 /* If nonnull, flow translation credits the specified statistics to each
256 * rule reached through a resubmit or OFPP_TABLE action.
abe529af
BP
257 *
258 * This is normally null so the client has to set it manually after
259 * calling action_xlate_ctx_init(). */
112bc5f4 260 const struct dpif_flow_stats *resubmit_stats;
abe529af 261
abe529af
BP
262/* xlate_actions() initializes and uses these members. The client might want
263 * to look at them after it returns. */
264
265 struct ofpbuf *odp_actions; /* Datapath actions. */
75a75043 266 tag_type tags; /* Tags associated with actions. */
6a7e895f 267 enum slow_path_reason slow; /* 0 if fast path may be used. */
75a75043
BP
268 bool has_learn; /* Actions include NXAST_LEARN? */
269 bool has_normal; /* Actions output to OFPP_NORMAL? */
0e553d9c 270 bool has_fin_timeout; /* Actions include NXAST_FIN_TIMEOUT? */
abe529af 271 uint16_t nf_output_iface; /* Output interface index for NetFlow. */
9d24de3b 272 mirror_mask_t mirrors; /* Bitmap of associated mirrors. */
abe529af
BP
273
274/* xlate_actions() initializes and uses these members, but the client has no
275 * reason to look at them. */
276
277 int recurse; /* Recursion level, via xlate_table_action. */
6a6455e5 278 bool max_resubmit_trigger; /* Recursed too deeply during translation. */
b3e9b2ed 279 struct flow base_flow; /* Flow at the last commit. */
deedf7e7 280 uint32_t orig_skb_priority; /* Priority when packet arrived. */
29901626 281 uint8_t table_id; /* OpenFlow table ID where flow was found. */
6ff686f2 282 uint32_t sflow_n_outputs; /* Number of output ports. */
9b56fe13 283 uint32_t sflow_odp_port; /* Output port for composing sFlow action. */
6ff686f2 284 uint16_t user_cookie_offset;/* Used for user_action_cookie fixup. */
848e8809 285 bool exit; /* No further actions should be processed. */
ccb7c863 286 struct flow orig_flow; /* Copy of original flow. */
abe529af
BP
287};
288
289static void action_xlate_ctx_init(struct action_xlate_ctx *,
290 struct ofproto_dpif *, const struct flow *,
18b2a258 291 ovs_be16 initial_tci, struct rule_dpif *,
0e553d9c 292 uint8_t tcp_flags, const struct ofpbuf *);
050ac423 293static void xlate_actions(struct action_xlate_ctx *,
f25d0cf3 294 const struct ofpact *ofpacts, size_t ofpacts_len,
050ac423
BP
295 struct ofpbuf *odp_actions);
296static void xlate_actions_for_side_effects(struct action_xlate_ctx *,
f25d0cf3
BP
297 const struct ofpact *ofpacts,
298 size_t ofpacts_len);
0a740f48
EJ
299static void xlate_table_action(struct action_xlate_ctx *, uint16_t in_port,
300 uint8_t table_id, bool may_packet_in);
abe529af 301
6a7e895f
BP
302static size_t put_userspace_action(const struct ofproto_dpif *,
303 struct ofpbuf *odp_actions,
304 const struct flow *,
305 const union user_action_cookie *);
306
307static void compose_slow_path(const struct ofproto_dpif *, const struct flow *,
308 enum slow_path_reason,
309 uint64_t *stub, size_t stub_size,
310 const struct nlattr **actionsp,
311 size_t *actions_lenp);
312
479df176
BP
313static void xlate_report(struct action_xlate_ctx *ctx, const char *s);
314
6a7e895f
BP
315/* A subfacet (see "struct subfacet" below) has three possible installation
316 * states:
317 *
318 * - SF_NOT_INSTALLED: Not installed in the datapath. This will only be the
319 * case just after the subfacet is created, just before the subfacet is
320 * destroyed, or if the datapath returns an error when we try to install a
321 * subfacet.
322 *
323 * - SF_FAST_PATH: The subfacet's actions are installed in the datapath.
324 *
325 * - SF_SLOW_PATH: An action that sends every packet for the subfacet through
326 * ofproto_dpif is installed in the datapath.
327 */
328enum subfacet_path {
329 SF_NOT_INSTALLED, /* No datapath flow for this subfacet. */
330 SF_FAST_PATH, /* Full actions are installed. */
331 SF_SLOW_PATH, /* Send-to-userspace action is installed. */
332};
333
334static const char *subfacet_path_to_string(enum subfacet_path);
335
5f5fbd17
BP
336/* A dpif flow and actions associated with a facet.
337 *
338 * See also the large comment on struct facet. */
339struct subfacet {
340 /* Owners. */
341 struct hmap_node hmap_node; /* In struct ofproto_dpif 'subfacets' list. */
342 struct list list_node; /* In struct facet's 'facets' list. */
343 struct facet *facet; /* Owning facet. */
344
345 /* Key.
346 *
347 * To save memory in the common case, 'key' is NULL if 'key_fitness' is
348 * ODP_FIT_PERFECT, that is, odp_flow_key_from_flow() can accurately
349 * regenerate the ODP flow key from ->facet->flow. */
350 enum odp_key_fitness key_fitness;
351 struct nlattr *key;
352 int key_len;
353
354 long long int used; /* Time last used; time created if not used. */
355
356 uint64_t dp_packet_count; /* Last known packet count in the datapath. */
357 uint64_t dp_byte_count; /* Last known byte count in the datapath. */
358
359 /* Datapath actions.
360 *
361 * These should be essentially identical for every subfacet in a facet, but
362 * may differ in trivial ways due to VLAN splinters. */
363 size_t actions_len; /* Number of bytes in actions[]. */
364 struct nlattr *actions; /* Datapath actions. */
365
6a7e895f
BP
366 enum slow_path_reason slow; /* 0 if fast path may be used. */
367 enum subfacet_path path; /* Installed in datapath? */
5f5fbd17
BP
368
369 /* This value is normally the same as ->facet->flow.vlan_tci. Only VLAN
370 * splinters can cause it to differ. This value should be removed when
371 * the VLAN splinters feature is no longer needed. */
372 ovs_be16 initial_tci; /* Initial VLAN TCI value. */
a088a1ff
JP
373
374 /* Datapath port the packet arrived on. This is needed to remove
375 * flows for ports that are no longer part of the bridge. Since the
376 * flow definition only has the OpenFlow port number and the port is
377 * no longer part of the bridge, we can't determine the datapath port
378 * number needed to delete the flow from the datapath. */
379 uint32_t odp_in_port;
5f5fbd17
BP
380};
381
1d85f9e5
JP
382#define SUBFACET_DESTROY_MAX_BATCH 50
383
a088a1ff 384static struct subfacet *subfacet_create(struct facet *, struct flow_miss *miss,
459b16a1 385 long long int now);
5f5fbd17 386static struct subfacet *subfacet_find(struct ofproto_dpif *,
acf60855
JP
387 const struct nlattr *key, size_t key_len,
388 uint32_t key_hash,
389 const struct flow *flow);
5f5fbd17
BP
390static void subfacet_destroy(struct subfacet *);
391static void subfacet_destroy__(struct subfacet *);
1d85f9e5
JP
392static void subfacet_destroy_batch(struct ofproto_dpif *,
393 struct subfacet **, int n);
5f5fbd17
BP
394static void subfacet_get_key(struct subfacet *, struct odputil_keybuf *,
395 struct ofpbuf *key);
396static void subfacet_reset_dp_stats(struct subfacet *,
397 struct dpif_flow_stats *);
398static void subfacet_update_time(struct subfacet *, long long int used);
399static void subfacet_update_stats(struct subfacet *,
400 const struct dpif_flow_stats *);
401static void subfacet_make_actions(struct subfacet *,
5fe20d5d
BP
402 const struct ofpbuf *packet,
403 struct ofpbuf *odp_actions);
5f5fbd17
BP
404static int subfacet_install(struct subfacet *,
405 const struct nlattr *actions, size_t actions_len,
6a7e895f 406 struct dpif_flow_stats *, enum slow_path_reason);
5f5fbd17
BP
407static void subfacet_uninstall(struct subfacet *);
408
6a7e895f
BP
409static enum subfacet_path subfacet_want_path(enum slow_path_reason);
410
b0f7b9b5
BP
411/* An exact-match instantiation of an OpenFlow flow.
412 *
413 * A facet associates a "struct flow", which represents the Open vSwitch
b95fc6ba
BP
414 * userspace idea of an exact-match flow, with one or more subfacets. Each
415 * subfacet tracks the datapath's idea of the exact-match flow equivalent to
416 * the facet. When the kernel module (or other dpif implementation) and Open
417 * vSwitch userspace agree on the definition of a flow key, there is exactly
418 * one subfacet per facet. If the dpif implementation supports more-specific
419 * flow matching than userspace, however, a facet can have more than one
420 * subfacet, each of which corresponds to some distinction in flow that
421 * userspace simply doesn't understand.
b0f7b9b5
BP
422 *
423 * Flow expiration works in terms of subfacets, so a facet must have at least
424 * one subfacet or it will never expire, leaking memory. */
abe529af 425struct facet {
b0f7b9b5
BP
426 /* Owners. */
427 struct hmap_node hmap_node; /* In owning ofproto's 'facets' hmap. */
428 struct list list_node; /* In owning rule's 'facets' list. */
429 struct rule_dpif *rule; /* Owning rule. */
430
431 /* Owned data. */
432 struct list subfacets;
abe529af
BP
433 long long int used; /* Time last used; time created if not used. */
434
b0f7b9b5
BP
435 /* Key. */
436 struct flow flow;
437
abe529af
BP
438 /* These statistics:
439 *
440 * - Do include packets and bytes sent "by hand", e.g. with
441 * dpif_execute().
442 *
443 * - Do include packets and bytes that were obtained from the datapath
b0f7b9b5 444 * when a subfacet's statistics were reset (e.g. dpif_flow_put() with
abe529af 445 * DPIF_FP_ZERO_STATS).
b0f7b9b5
BP
446 *
447 * - Do not include packets or bytes that can be obtained from the
448 * datapath for any existing subfacet.
abe529af
BP
449 */
450 uint64_t packet_count; /* Number of packets received. */
451 uint64_t byte_count; /* Number of bytes received. */
452
b0f7b9b5 453 /* Resubmit statistics. */
9d24de3b
JP
454 uint64_t prev_packet_count; /* Number of packets from last stats push. */
455 uint64_t prev_byte_count; /* Number of bytes from last stats push. */
456 long long int prev_used; /* Used time from last stats push. */
abe529af 457
b0f7b9b5 458 /* Accounting. */
907a4c5e 459 uint64_t accounted_bytes; /* Bytes processed by facet_account(). */
b0f7b9b5 460 struct netflow_flow nf_flow; /* Per-flow NetFlow tracking data. */
0e553d9c 461 uint8_t tcp_flags; /* TCP flags seen for this 'rule'. */
abe529af 462
b95fc6ba
BP
463 /* Properties of datapath actions.
464 *
465 * Every subfacet has its own actions because actions can differ slightly
466 * between splintered and non-splintered subfacets due to the VLAN tag
467 * being initially different (present vs. absent). All of them have these
468 * properties in common so we just store one copy of them here. */
75a75043
BP
469 bool has_learn; /* Actions include NXAST_LEARN? */
470 bool has_normal; /* Actions output to OFPP_NORMAL? */
0e553d9c 471 bool has_fin_timeout; /* Actions include NXAST_FIN_TIMEOUT? */
b0f7b9b5 472 tag_type tags; /* Tags that would require revalidation. */
9d24de3b 473 mirror_mask_t mirrors; /* Bitmap of dependent mirrors. */
26cd7e34
BP
474
475 /* Storage for a single subfacet, to reduce malloc() time and space
476 * overhead. (A facet always has at least one subfacet and in the common
477 * case has exactly one subfacet.) */
478 struct subfacet one_subfacet;
abe529af
BP
479};
480
2b459b83
BP
481static struct facet *facet_create(struct rule_dpif *,
482 const struct flow *, uint32_t hash);
15baa734 483static void facet_remove(struct facet *);
abe529af
BP
484static void facet_free(struct facet *);
485
2b459b83
BP
486static struct facet *facet_find(struct ofproto_dpif *,
487 const struct flow *, uint32_t hash);
abe529af 488static struct facet *facet_lookup_valid(struct ofproto_dpif *,
2b459b83 489 const struct flow *, uint32_t hash);
c57b2226 490static void facet_revalidate(struct facet *);
6814e51f 491static bool facet_check_consistency(struct facet *);
abe529af 492
15baa734 493static void facet_flush_stats(struct facet *);
abe529af 494
15baa734 495static void facet_update_time(struct facet *, long long int used);
bbb5d219 496static void facet_reset_counters(struct facet *);
abe529af 497static void facet_push_stats(struct facet *);
3de9590b
BP
498static void facet_learn(struct facet *);
499static void facet_account(struct facet *);
abe529af
BP
500
501static bool facet_is_controller_flow(struct facet *);
502
abe529af 503struct ofport_dpif {
acf60855 504 struct hmap_node odp_port_node; /* In dpif_backer's "odp_to_ofport_map". */
abe529af
BP
505 struct ofport up;
506
507 uint32_t odp_port;
508 struct ofbundle *bundle; /* Bundle that contains this port, if any. */
509 struct list bundle_node; /* In struct ofbundle's "ports" list. */
510 struct cfm *cfm; /* Connectivity Fault Management, if any. */
511 tag_type tag; /* Tag associated with this port. */
015e08bc 512 bool may_enable; /* May be enabled in bonds. */
3e5b3fdb 513 long long int carrier_seq; /* Carrier status changes. */
b9ad7294 514 struct tnl_port *tnl_port; /* Tunnel handle, or null. */
21f7563c 515
52a90c29 516 /* Spanning tree. */
21f7563c
JP
517 struct stp_port *stp_port; /* Spanning Tree Protocol, if any. */
518 enum stp_state stp_state; /* Always STP_DISABLED if STP not in use. */
519 long long int stp_state_entered;
8b36f51e
EJ
520
521 struct hmap priorities; /* Map of attached 'priority_to_dscp's. */
52a90c29
BP
522
523 /* Linux VLAN device support (e.g. "eth0.10" for VLAN 10.)
524 *
525 * This is deprecated. It is only for compatibility with broken device
526 * drivers in old versions of Linux that do not properly support VLANs when
527 * VLAN devices are not used. When broken device drivers are no longer in
528 * widespread use, we will delete these interfaces. */
529 uint16_t realdev_ofp_port;
530 int vlandev_vid;
8b36f51e
EJ
531};
532
533/* Node in 'ofport_dpif''s 'priorities' map. Used to maintain a map from
534 * 'priority' (the datapath's term for QoS queue) to the dscp bits which all
535 * traffic egressing the 'ofport' with that priority should be marked with. */
536struct priority_to_dscp {
537 struct hmap_node hmap_node; /* Node in 'ofport_dpif''s 'priorities' map. */
538 uint32_t priority; /* Priority of this queue (see struct flow). */
539
540 uint8_t dscp; /* DSCP bits to mark outgoing traffic with. */
abe529af
BP
541};
542
52a90c29
BP
543/* Linux VLAN device support (e.g. "eth0.10" for VLAN 10.)
544 *
545 * This is deprecated. It is only for compatibility with broken device drivers
546 * in old versions of Linux that do not properly support VLANs when VLAN
547 * devices are not used. When broken device drivers are no longer in
548 * widespread use, we will delete these interfaces. */
549struct vlan_splinter {
550 struct hmap_node realdev_vid_node;
551 struct hmap_node vlandev_node;
552 uint16_t realdev_ofp_port;
553 uint16_t vlandev_ofp_port;
554 int vid;
555};
556
557static uint32_t vsp_realdev_to_vlandev(const struct ofproto_dpif *,
558 uint32_t realdev, ovs_be16 vlan_tci);
b98d8985 559static bool vsp_adjust_flow(const struct ofproto_dpif *, struct flow *);
52a90c29
BP
560static void vsp_remove(struct ofport_dpif *);
561static void vsp_add(struct ofport_dpif *, uint16_t realdev_ofp_port, int vid);
562
e1b1d06a
JP
563static uint32_t ofp_port_to_odp_port(const struct ofproto_dpif *,
564 uint16_t ofp_port);
565static uint16_t odp_port_to_ofp_port(const struct ofproto_dpif *,
566 uint32_t odp_port);
567
abe529af
BP
568static struct ofport_dpif *
569ofport_dpif_cast(const struct ofport *ofport)
570{
cb22974d 571 ovs_assert(ofport->ofproto->ofproto_class == &ofproto_dpif_class);
abe529af
BP
572 return ofport ? CONTAINER_OF(ofport, struct ofport_dpif, up) : NULL;
573}
574
575static void port_run(struct ofport_dpif *);
0aa66d6e 576static void port_run_fast(struct ofport_dpif *);
abe529af 577static void port_wait(struct ofport_dpif *);
a5610457 578static int set_cfm(struct ofport *, const struct cfm_settings *);
8b36f51e 579static void ofport_clear_priorities(struct ofport_dpif *);
abe529af 580
7ee20df1
BP
581struct dpif_completion {
582 struct list list_node;
583 struct ofoperation *op;
584};
585
54a9cbc9
BP
586/* Extra information about a classifier table.
587 * Currently used just for optimized flow revalidation. */
588struct table_dpif {
589 /* If either of these is nonnull, then this table has a form that allows
590 * flows to be tagged to avoid revalidating most flows for the most common
591 * kinds of flow table changes. */
592 struct cls_table *catchall_table; /* Table that wildcards all fields. */
593 struct cls_table *other_table; /* Table with any other wildcard set. */
594 uint32_t basis; /* Keeps each table's tags separate. */
595};
596
3c4a309c
BP
597/* Reasons that we might need to revalidate every facet, and corresponding
598 * coverage counters.
599 *
600 * A value of 0 means that there is no need to revalidate.
601 *
602 * It would be nice to have some cleaner way to integrate with coverage
603 * counters, but with only a few reasons I guess this is good enough for
604 * now. */
605enum revalidate_reason {
606 REV_RECONFIGURE = 1, /* Switch configuration changed. */
607 REV_STP, /* Spanning tree protocol port status change. */
608 REV_PORT_TOGGLED, /* Port enabled or disabled by CFM, LACP, ...*/
609 REV_FLOW_TABLE, /* Flow table changed. */
610 REV_INCONSISTENCY /* Facet self-check failed. */
611};
612COVERAGE_DEFINE(rev_reconfigure);
613COVERAGE_DEFINE(rev_stp);
614COVERAGE_DEFINE(rev_port_toggled);
615COVERAGE_DEFINE(rev_flow_table);
616COVERAGE_DEFINE(rev_inconsistency);
617
8f73d537
EJ
618/* Drop keys are odp flow keys which have drop flows installed in the kernel.
619 * These are datapath flows which have no associated ofproto, if they did we
620 * would use facets. */
621struct drop_key {
622 struct hmap_node hmap_node;
623 struct nlattr *key;
624 size_t key_len;
625};
626
acf60855
JP
627/* All datapaths of a given type share a single dpif backer instance. */
628struct dpif_backer {
629 char *type;
630 int refcount;
631 struct dpif *dpif;
632 struct timer next_expiration;
633 struct hmap odp_to_ofport_map; /* ODP port to ofport mapping. */
2cc3c58e 634
b9ad7294
EJ
635 struct sset tnl_backers; /* Set of dpif ports backing tunnels. */
636
2cc3c58e
EJ
637 /* Facet revalidation flags applying to facets which use this backer. */
638 enum revalidate_reason need_revalidate; /* Revalidate every facet. */
639 struct tag_set revalidate_set; /* Revalidate only matching facets. */
8f73d537
EJ
640
641 struct hmap drop_keys; /* Set of dropped odp keys. */
acf60855
JP
642};
643
644/* All existing ofproto_backer instances, indexed by ofproto->up.type. */
645static struct shash all_dpif_backers = SHASH_INITIALIZER(&all_dpif_backers);
646
8f73d537 647static void drop_key_clear(struct dpif_backer *);
acf60855
JP
648static struct ofport_dpif *
649odp_port_to_ofport(const struct dpif_backer *, uint32_t odp_port);
650
abe529af 651struct ofproto_dpif {
b44a10b7 652 struct hmap_node all_ofproto_dpifs_node; /* In 'all_ofproto_dpifs'. */
abe529af 653 struct ofproto up;
acf60855 654 struct dpif_backer *backer;
abe529af 655
c57b2226
BP
656 /* Special OpenFlow rules. */
657 struct rule_dpif *miss_rule; /* Sends flow table misses to controller. */
658 struct rule_dpif *no_packet_in_rule; /* Drops flow table misses. */
659
6c1491fb
BP
660 /* Statistics. */
661 uint64_t n_matches;
662
abe529af
BP
663 /* Bridging. */
664 struct netflow *netflow;
bae473fe 665 struct dpif_sflow *sflow;
abe529af
BP
666 struct hmap bundles; /* Contains "struct ofbundle"s. */
667 struct mac_learning *ml;
668 struct ofmirror *mirrors[MAX_MIRRORS];
ccb7c863 669 bool has_mirrors;
abe529af
BP
670 bool has_bonded_bundles;
671
abe529af
BP
672 /* Facets. */
673 struct hmap facets;
b0f7b9b5 674 struct hmap subfacets;
9d6ac44e 675 struct governor *governor;
54a9cbc9
BP
676
677 /* Revalidation. */
678 struct table_dpif tables[N_TABLES];
7ee20df1
BP
679
680 /* Support for debugging async flow mods. */
681 struct list completions;
daff3353
EJ
682
683 bool has_bundle_action; /* True when the first bundle action appears. */
6527c598
PS
684 struct netdev_stats stats; /* To account packets generated and consumed in
685 * userspace. */
21f7563c
JP
686
687 /* Spanning tree. */
688 struct stp *stp;
689 long long int stp_last_tick;
52a90c29
BP
690
691 /* VLAN splinters. */
692 struct hmap realdev_vid_map; /* (realdev,vid) -> vlandev. */
693 struct hmap vlandev_map; /* vlandev -> (realdev,vid). */
e1b1d06a 694
acf60855 695 /* Ports. */
0a740f48
EJ
696 struct sset ports; /* Set of standard port names. */
697 struct sset ghost_ports; /* Ports with no datapath port. */
acf60855
JP
698 struct sset port_poll_set; /* Queued names for port_poll() reply. */
699 int port_poll_errno; /* Last errno for port_poll() reply. */
abe529af
BP
700};
701
7ee20df1
BP
702/* Defer flow mod completion until "ovs-appctl ofproto/unclog"? (Useful only
703 * for debugging the asynchronous flow_mod implementation.) */
704static bool clogged;
705
b44a10b7
BP
706/* All existing ofproto_dpif instances, indexed by ->up.name. */
707static struct hmap all_ofproto_dpifs = HMAP_INITIALIZER(&all_ofproto_dpifs);
708
abe529af
BP
709static void ofproto_dpif_unixctl_init(void);
710
711static struct ofproto_dpif *
712ofproto_dpif_cast(const struct ofproto *ofproto)
713{
cb22974d 714 ovs_assert(ofproto->ofproto_class == &ofproto_dpif_class);
abe529af
BP
715 return CONTAINER_OF(ofproto, struct ofproto_dpif, up);
716}
717
4acbc98d 718static struct ofport_dpif *get_ofp_port(const struct ofproto_dpif *,
abe529af 719 uint16_t ofp_port);
4acbc98d 720static struct ofport_dpif *get_odp_port(const struct ofproto_dpif *,
abe529af 721 uint32_t odp_port);
6a6455e5
EJ
722static void ofproto_trace(struct ofproto_dpif *, const struct flow *,
723 const struct ofpbuf *, ovs_be16 initial_tci,
724 struct ds *);
b9ad7294 725static bool may_dpif_port_del(struct ofport_dpif *);
abe529af
BP
726
727/* Packet processing. */
728static void update_learning_table(struct ofproto_dpif *,
729 const struct flow *, int vlan,
730 struct ofbundle *);
501f8d1f
BP
731/* Upcalls. */
732#define FLOW_MISS_MAX_BATCH 50
acf60855 733static int handle_upcalls(struct dpif_backer *, unsigned int max_batch);
abe529af
BP
734
735/* Flow expiration. */
acf60855 736static int expire(struct dpif_backer *);
abe529af 737
6fca1ffb
BP
738/* NetFlow. */
739static void send_netflow_active_timeouts(struct ofproto_dpif *);
740
abe529af 741/* Utilities. */
52a90c29 742static int send_packet(const struct ofport_dpif *, struct ofpbuf *packet);
6a7d1a39
BP
743static size_t compose_sflow_action(const struct ofproto_dpif *,
744 struct ofpbuf *odp_actions,
745 const struct flow *, uint32_t odp_port);
c06bba01
JP
746static void add_mirror_actions(struct action_xlate_ctx *ctx,
747 const struct flow *flow);
abe529af
BP
748/* Global variables. */
749static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
acf60855
JP
750
751/* Initial mappings of port to bridge mappings. */
752static struct shash init_ofp_ports = SHASH_INITIALIZER(&init_ofp_ports);
abe529af
BP
753\f
754/* Factory functions. */
755
b0408fca 756static void
acf60855 757init(const struct shash *iface_hints)
b0408fca 758{
acf60855
JP
759 struct shash_node *node;
760
761 /* Make a local copy, since we don't own 'iface_hints' elements. */
762 SHASH_FOR_EACH(node, iface_hints) {
763 const struct iface_hint *orig_hint = node->data;
764 struct iface_hint *new_hint = xmalloc(sizeof *new_hint);
765
766 new_hint->br_name = xstrdup(orig_hint->br_name);
767 new_hint->br_type = xstrdup(orig_hint->br_type);
768 new_hint->ofp_port = orig_hint->ofp_port;
769
770 shash_add(&init_ofp_ports, node->name, new_hint);
771 }
b0408fca
JP
772}
773
abe529af
BP
774static void
775enumerate_types(struct sset *types)
776{
777 dp_enumerate_types(types);
778}
779
780static int
781enumerate_names(const char *type, struct sset *names)
782{
acf60855
JP
783 struct ofproto_dpif *ofproto;
784
785 sset_clear(names);
786 HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_node, &all_ofproto_dpifs) {
787 if (strcmp(type, ofproto->up.type)) {
788 continue;
789 }
790 sset_add(names, ofproto->up.name);
791 }
792
793 return 0;
abe529af
BP
794}
795
796static int
797del(const char *type, const char *name)
798{
799 struct dpif *dpif;
800 int error;
801
802 error = dpif_open(name, type, &dpif);
803 if (!error) {
804 error = dpif_delete(dpif);
805 dpif_close(dpif);
806 }
807 return error;
808}
809\f
0aeaabc8
JP
810static const char *
811port_open_type(const char *datapath_type, const char *port_type)
812{
813 return dpif_port_open_type(datapath_type, port_type);
814}
815
acf60855
JP
816/* Type functions. */
817
476cb42a
BP
818static struct ofproto_dpif *
819lookup_ofproto_dpif_by_port_name(const char *name)
820{
821 struct ofproto_dpif *ofproto;
822
823 HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_node, &all_ofproto_dpifs) {
824 if (sset_contains(&ofproto->ports, name)) {
825 return ofproto;
826 }
827 }
828
829 return NULL;
830}
831
acf60855
JP
832static int
833type_run(const char *type)
834{
835 struct dpif_backer *backer;
836 char *devname;
837 int error;
838
839 backer = shash_find_data(&all_dpif_backers, type);
840 if (!backer) {
841 /* This is not necessarily a problem, since backers are only
842 * created on demand. */
843 return 0;
844 }
845
846 dpif_run(backer->dpif);
847
2cc3c58e
EJ
848 if (backer->need_revalidate
849 || !tag_set_is_empty(&backer->revalidate_set)) {
850 struct tag_set revalidate_set = backer->revalidate_set;
851 bool need_revalidate = backer->need_revalidate;
852 struct ofproto_dpif *ofproto;
853
854 switch (backer->need_revalidate) {
855 case REV_RECONFIGURE: COVERAGE_INC(rev_reconfigure); break;
856 case REV_STP: COVERAGE_INC(rev_stp); break;
857 case REV_PORT_TOGGLED: COVERAGE_INC(rev_port_toggled); break;
858 case REV_FLOW_TABLE: COVERAGE_INC(rev_flow_table); break;
859 case REV_INCONSISTENCY: COVERAGE_INC(rev_inconsistency); break;
860 }
861
8f73d537
EJ
862 if (backer->need_revalidate) {
863 /* Clear the drop_keys in case we should now be accepting some
864 * formerly dropped flows. */
865 drop_key_clear(backer);
866 }
867
f728af2e
BP
868 /* Clear the revalidation flags. */
869 tag_set_init(&backer->revalidate_set);
870 backer->need_revalidate = 0;
871
2cc3c58e
EJ
872 HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_node, &all_ofproto_dpifs) {
873 struct facet *facet;
874
875 if (ofproto->backer != backer) {
876 continue;
877 }
878
2cc3c58e
EJ
879 HMAP_FOR_EACH (facet, hmap_node, &ofproto->facets) {
880 if (need_revalidate
881 || tag_set_intersects(&revalidate_set, facet->tags)) {
882 facet_revalidate(facet);
883 }
884 }
885 }
2cc3c58e
EJ
886 }
887
acf60855
JP
888 if (timer_expired(&backer->next_expiration)) {
889 int delay = expire(backer);
890 timer_set_duration(&backer->next_expiration, delay);
891 }
892
893 /* Check for port changes in the dpif. */
894 while ((error = dpif_port_poll(backer->dpif, &devname)) == 0) {
476cb42a 895 struct ofproto_dpif *ofproto;
acf60855
JP
896 struct dpif_port port;
897
898 /* Don't report on the datapath's device. */
899 if (!strcmp(devname, dpif_base_name(backer->dpif))) {
c83b89ab 900 goto next;
acf60855
JP
901 }
902
b9ad7294
EJ
903 HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_node,
904 &all_ofproto_dpifs) {
905 if (sset_contains(&ofproto->backer->tnl_backers, devname)) {
906 goto next;
907 }
908 }
909
476cb42a 910 ofproto = lookup_ofproto_dpif_by_port_name(devname);
acf60855
JP
911 if (dpif_port_query_by_name(backer->dpif, devname, &port)) {
912 /* The port was removed. If we know the datapath,
913 * report it through poll_set(). If we don't, it may be
914 * notifying us of a removal we initiated, so ignore it.
915 * If there's a pending ENOBUFS, let it stand, since
916 * everything will be reevaluated. */
917 if (ofproto && ofproto->port_poll_errno != ENOBUFS) {
918 sset_add(&ofproto->port_poll_set, devname);
919 ofproto->port_poll_errno = 0;
920 }
acf60855
JP
921 } else if (!ofproto) {
922 /* The port was added, but we don't know with which
923 * ofproto we should associate it. Delete it. */
924 dpif_port_del(backer->dpif, port.port_no);
925 }
5b5e6a4c 926 dpif_port_destroy(&port);
acf60855 927
c83b89ab 928 next:
acf60855
JP
929 free(devname);
930 }
931
932 if (error != EAGAIN) {
933 struct ofproto_dpif *ofproto;
934
935 /* There was some sort of error, so propagate it to all
936 * ofprotos that use this backer. */
937 HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_node,
938 &all_ofproto_dpifs) {
939 if (ofproto->backer == backer) {
940 sset_clear(&ofproto->port_poll_set);
941 ofproto->port_poll_errno = error;
942 }
943 }
944 }
945
946 return 0;
947}
948
949static int
950type_run_fast(const char *type)
951{
952 struct dpif_backer *backer;
953 unsigned int work;
954
955 backer = shash_find_data(&all_dpif_backers, type);
956 if (!backer) {
957 /* This is not necessarily a problem, since backers are only
958 * created on demand. */
959 return 0;
960 }
961
962 /* Handle one or more batches of upcalls, until there's nothing left to do
963 * or until we do a fixed total amount of work.
964 *
965 * We do work in batches because it can be much cheaper to set up a number
966 * of flows and fire off their patches all at once. We do multiple batches
967 * because in some cases handling a packet can cause another packet to be
968 * queued almost immediately as part of the return flow. Both
969 * optimizations can make major improvements on some benchmarks and
970 * presumably for real traffic as well. */
971 work = 0;
972 while (work < FLOW_MISS_MAX_BATCH) {
973 int retval = handle_upcalls(backer, FLOW_MISS_MAX_BATCH - work);
974 if (retval <= 0) {
975 return -retval;
976 }
977 work += retval;
978 }
979
980 return 0;
981}
982
983static void
984type_wait(const char *type)
985{
986 struct dpif_backer *backer;
987
988 backer = shash_find_data(&all_dpif_backers, type);
989 if (!backer) {
990 /* This is not necessarily a problem, since backers are only
991 * created on demand. */
992 return;
993 }
994
995 timer_wait(&backer->next_expiration);
996}
997\f
abe529af
BP
998/* Basic life-cycle. */
999
c57b2226
BP
1000static int add_internal_flows(struct ofproto_dpif *);
1001
abe529af
BP
1002static struct ofproto *
1003alloc(void)
1004{
1005 struct ofproto_dpif *ofproto = xmalloc(sizeof *ofproto);
1006 return &ofproto->up;
1007}
1008
1009static void
1010dealloc(struct ofproto *ofproto_)
1011{
1012 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
1013 free(ofproto);
1014}
1015
acf60855
JP
1016static void
1017close_dpif_backer(struct dpif_backer *backer)
1018{
1019 struct shash_node *node;
1020
cb22974d 1021 ovs_assert(backer->refcount > 0);
acf60855
JP
1022
1023 if (--backer->refcount) {
1024 return;
1025 }
1026
8f73d537
EJ
1027 drop_key_clear(backer);
1028 hmap_destroy(&backer->drop_keys);
1029
b9ad7294 1030 sset_destroy(&backer->tnl_backers);
acf60855
JP
1031 hmap_destroy(&backer->odp_to_ofport_map);
1032 node = shash_find(&all_dpif_backers, backer->type);
1033 free(backer->type);
1034 shash_delete(&all_dpif_backers, node);
1035 dpif_close(backer->dpif);
1036
1037 free(backer);
1038}
1039
1040/* Datapath port slated for removal from datapath. */
1041struct odp_garbage {
1042 struct list list_node;
1043 uint32_t odp_port;
1044};
1045
1046static int
1047open_dpif_backer(const char *type, struct dpif_backer **backerp)
1048{
1049 struct dpif_backer *backer;
1050 struct dpif_port_dump port_dump;
1051 struct dpif_port port;
1052 struct shash_node *node;
1053 struct list garbage_list;
1054 struct odp_garbage *garbage, *next;
1055 struct sset names;
1056 char *backer_name;
1057 const char *name;
1058 int error;
1059
1060 backer = shash_find_data(&all_dpif_backers, type);
1061 if (backer) {
1062 backer->refcount++;
1063 *backerp = backer;
1064 return 0;
1065 }
1066
1067 backer_name = xasprintf("ovs-%s", type);
1068
1069 /* Remove any existing datapaths, since we assume we're the only
1070 * userspace controlling the datapath. */
1071 sset_init(&names);
1072 dp_enumerate_names(type, &names);
1073 SSET_FOR_EACH(name, &names) {
1074 struct dpif *old_dpif;
1075
1076 /* Don't remove our backer if it exists. */
1077 if (!strcmp(name, backer_name)) {
1078 continue;
1079 }
1080
1081 if (dpif_open(name, type, &old_dpif)) {
1082 VLOG_WARN("couldn't open old datapath %s to remove it", name);
1083 } else {
1084 dpif_delete(old_dpif);
1085 dpif_close(old_dpif);
1086 }
1087 }
1088 sset_destroy(&names);
1089
1090 backer = xmalloc(sizeof *backer);
1091
1092 error = dpif_create_and_open(backer_name, type, &backer->dpif);
1093 free(backer_name);
1094 if (error) {
1095 VLOG_ERR("failed to open datapath of type %s: %s", type,
1096 strerror(error));
4c1b1289 1097 free(backer);
acf60855
JP
1098 return error;
1099 }
1100
1101 backer->type = xstrdup(type);
1102 backer->refcount = 1;
1103 hmap_init(&backer->odp_to_ofport_map);
8f73d537 1104 hmap_init(&backer->drop_keys);
acf60855 1105 timer_set_duration(&backer->next_expiration, 1000);
2cc3c58e 1106 backer->need_revalidate = 0;
b9ad7294 1107 sset_init(&backer->tnl_backers);
2cc3c58e 1108 tag_set_init(&backer->revalidate_set);
acf60855
JP
1109 *backerp = backer;
1110
1111 dpif_flow_flush(backer->dpif);
1112
1113 /* Loop through the ports already on the datapath and remove any
1114 * that we don't need anymore. */
1115 list_init(&garbage_list);
1116 dpif_port_dump_start(&port_dump, backer->dpif);
1117 while (dpif_port_dump_next(&port_dump, &port)) {
1118 node = shash_find(&init_ofp_ports, port.name);
1119 if (!node && strcmp(port.name, dpif_base_name(backer->dpif))) {
1120 garbage = xmalloc(sizeof *garbage);
1121 garbage->odp_port = port.port_no;
1122 list_push_front(&garbage_list, &garbage->list_node);
1123 }
1124 }
1125 dpif_port_dump_done(&port_dump);
1126
1127 LIST_FOR_EACH_SAFE (garbage, next, list_node, &garbage_list) {
1128 dpif_port_del(backer->dpif, garbage->odp_port);
1129 list_remove(&garbage->list_node);
1130 free(garbage);
1131 }
1132
1133 shash_add(&all_dpif_backers, type, backer);
1134
1135 error = dpif_recv_set(backer->dpif, true);
1136 if (error) {
1137 VLOG_ERR("failed to listen on datapath of type %s: %s",
1138 type, strerror(error));
1139 close_dpif_backer(backer);
1140 return error;
1141 }
1142
1143 return error;
1144}
1145
abe529af 1146static int
0f5f95a9 1147construct(struct ofproto *ofproto_)
abe529af
BP
1148{
1149 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
acf60855 1150 struct shash_node *node, *next;
91858960 1151 int max_ports;
abe529af
BP
1152 int error;
1153 int i;
1154
acf60855 1155 error = open_dpif_backer(ofproto->up.type, &ofproto->backer);
abe529af 1156 if (error) {
abe529af
BP
1157 return error;
1158 }
1159
acf60855 1160 max_ports = dpif_get_max_ports(ofproto->backer->dpif);
91858960
BP
1161 ofproto_init_max_ports(ofproto_, MIN(max_ports, OFPP_MAX));
1162
6c1491fb 1163 ofproto->n_matches = 0;
abe529af 1164
abe529af
BP
1165 ofproto->netflow = NULL;
1166 ofproto->sflow = NULL;
21f7563c 1167 ofproto->stp = NULL;
abe529af 1168 hmap_init(&ofproto->bundles);
e764773c 1169 ofproto->ml = mac_learning_create(MAC_ENTRY_DEFAULT_IDLE_TIME);
abe529af
BP
1170 for (i = 0; i < MAX_MIRRORS; i++) {
1171 ofproto->mirrors[i] = NULL;
1172 }
1173 ofproto->has_bonded_bundles = false;
1174
abe529af 1175 hmap_init(&ofproto->facets);
b0f7b9b5 1176 hmap_init(&ofproto->subfacets);
9d6ac44e 1177 ofproto->governor = NULL;
54a9cbc9
BP
1178
1179 for (i = 0; i < N_TABLES; i++) {
1180 struct table_dpif *table = &ofproto->tables[i];
1181
1182 table->catchall_table = NULL;
1183 table->other_table = NULL;
1184 table->basis = random_uint32();
1185 }
abe529af 1186
7ee20df1
BP
1187 list_init(&ofproto->completions);
1188
abe529af
BP
1189 ofproto_dpif_unixctl_init();
1190
ccb7c863 1191 ofproto->has_mirrors = false;
daff3353
EJ
1192 ofproto->has_bundle_action = false;
1193
52a90c29
BP
1194 hmap_init(&ofproto->vlandev_map);
1195 hmap_init(&ofproto->realdev_vid_map);
1196
acf60855 1197 sset_init(&ofproto->ports);
0a740f48 1198 sset_init(&ofproto->ghost_ports);
acf60855
JP
1199 sset_init(&ofproto->port_poll_set);
1200 ofproto->port_poll_errno = 0;
1201
1202 SHASH_FOR_EACH_SAFE (node, next, &init_ofp_ports) {
4f9e08a5 1203 struct iface_hint *iface_hint = node->data;
acf60855
JP
1204
1205 if (!strcmp(iface_hint->br_name, ofproto->up.name)) {
1206 /* Check if the datapath already has this port. */
1207 if (dpif_port_exists(ofproto->backer->dpif, node->name)) {
1208 sset_add(&ofproto->ports, node->name);
1209 }
1210
1211 free(iface_hint->br_name);
1212 free(iface_hint->br_type);
4f9e08a5 1213 free(iface_hint);
acf60855
JP
1214 shash_delete(&init_ofp_ports, node);
1215 }
1216 }
e1b1d06a 1217
b44a10b7
BP
1218 hmap_insert(&all_ofproto_dpifs, &ofproto->all_ofproto_dpifs_node,
1219 hash_string(ofproto->up.name, 0));
6527c598 1220 memset(&ofproto->stats, 0, sizeof ofproto->stats);
0f5f95a9
BP
1221
1222 ofproto_init_tables(ofproto_, N_TABLES);
c57b2226
BP
1223 error = add_internal_flows(ofproto);
1224 ofproto->up.tables[TBL_INTERNAL].flags = OFTABLE_HIDDEN | OFTABLE_READONLY;
1225
1226 return error;
1227}
1228
1229static int
1230add_internal_flow(struct ofproto_dpif *ofproto, int id,
f25d0cf3 1231 const struct ofpbuf *ofpacts, struct rule_dpif **rulep)
c57b2226
BP
1232{
1233 struct ofputil_flow_mod fm;
1234 int error;
1235
81a76618
BP
1236 match_init_catchall(&fm.match);
1237 fm.priority = 0;
1238 match_set_reg(&fm.match, 0, id);
623e1caf 1239 fm.new_cookie = htonll(0);
c57b2226
BP
1240 fm.cookie = htonll(0);
1241 fm.cookie_mask = htonll(0);
1242 fm.table_id = TBL_INTERNAL;
1243 fm.command = OFPFC_ADD;
1244 fm.idle_timeout = 0;
1245 fm.hard_timeout = 0;
1246 fm.buffer_id = 0;
1247 fm.out_port = 0;
1248 fm.flags = 0;
f25d0cf3
BP
1249 fm.ofpacts = ofpacts->data;
1250 fm.ofpacts_len = ofpacts->size;
c57b2226
BP
1251
1252 error = ofproto_flow_mod(&ofproto->up, &fm);
1253 if (error) {
1254 VLOG_ERR_RL(&rl, "failed to add internal flow %d (%s)",
1255 id, ofperr_to_string(error));
1256 return error;
1257 }
1258
81a76618 1259 *rulep = rule_dpif_lookup__(ofproto, &fm.match.flow, TBL_INTERNAL);
cb22974d 1260 ovs_assert(*rulep != NULL);
0f5f95a9 1261
abe529af
BP
1262 return 0;
1263}
1264
c57b2226
BP
1265static int
1266add_internal_flows(struct ofproto_dpif *ofproto)
1267{
f25d0cf3
BP
1268 struct ofpact_controller *controller;
1269 uint64_t ofpacts_stub[128 / 8];
1270 struct ofpbuf ofpacts;
c57b2226
BP
1271 int error;
1272 int id;
1273
f25d0cf3 1274 ofpbuf_use_stack(&ofpacts, ofpacts_stub, sizeof ofpacts_stub);
c57b2226
BP
1275 id = 1;
1276
f25d0cf3
BP
1277 controller = ofpact_put_CONTROLLER(&ofpacts);
1278 controller->max_len = UINT16_MAX;
1279 controller->controller_id = 0;
1280 controller->reason = OFPR_NO_MATCH;
1281 ofpact_pad(&ofpacts);
1282
1283 error = add_internal_flow(ofproto, id++, &ofpacts, &ofproto->miss_rule);
c57b2226
BP
1284 if (error) {
1285 return error;
1286 }
1287
f25d0cf3
BP
1288 ofpbuf_clear(&ofpacts);
1289 error = add_internal_flow(ofproto, id++, &ofpacts,
c57b2226
BP
1290 &ofproto->no_packet_in_rule);
1291 return error;
1292}
1293
7ee20df1
BP
1294static void
1295complete_operations(struct ofproto_dpif *ofproto)
1296{
1297 struct dpif_completion *c, *next;
1298
1299 LIST_FOR_EACH_SAFE (c, next, list_node, &ofproto->completions) {
1300 ofoperation_complete(c->op, 0);
1301 list_remove(&c->list_node);
1302 free(c);
1303 }
1304}
1305
abe529af
BP
1306static void
1307destruct(struct ofproto *ofproto_)
1308{
1309 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
7ee20df1 1310 struct rule_dpif *rule, *next_rule;
d0918789 1311 struct oftable *table;
abe529af
BP
1312 int i;
1313
b44a10b7 1314 hmap_remove(&all_ofproto_dpifs, &ofproto->all_ofproto_dpifs_node);
7ee20df1
BP
1315 complete_operations(ofproto);
1316
0697b5c3
BP
1317 OFPROTO_FOR_EACH_TABLE (table, &ofproto->up) {
1318 struct cls_cursor cursor;
1319
d0918789 1320 cls_cursor_init(&cursor, &table->cls, NULL);
0697b5c3
BP
1321 CLS_CURSOR_FOR_EACH_SAFE (rule, next_rule, up.cr, &cursor) {
1322 ofproto_rule_destroy(&rule->up);
1323 }
7ee20df1
BP
1324 }
1325
abe529af
BP
1326 for (i = 0; i < MAX_MIRRORS; i++) {
1327 mirror_destroy(ofproto->mirrors[i]);
1328 }
1329
1330 netflow_destroy(ofproto->netflow);
bae473fe 1331 dpif_sflow_destroy(ofproto->sflow);
abe529af
BP
1332 hmap_destroy(&ofproto->bundles);
1333 mac_learning_destroy(ofproto->ml);
1334
1335 hmap_destroy(&ofproto->facets);
b0f7b9b5 1336 hmap_destroy(&ofproto->subfacets);
9d6ac44e 1337 governor_destroy(ofproto->governor);
abe529af 1338
52a90c29
BP
1339 hmap_destroy(&ofproto->vlandev_map);
1340 hmap_destroy(&ofproto->realdev_vid_map);
1341
acf60855 1342 sset_destroy(&ofproto->ports);
0a740f48 1343 sset_destroy(&ofproto->ghost_ports);
acf60855 1344 sset_destroy(&ofproto->port_poll_set);
e1b1d06a 1345
acf60855 1346 close_dpif_backer(ofproto->backer);
abe529af
BP
1347}
1348
1349static int
5fcc0d00 1350run_fast(struct ofproto *ofproto_)
abe529af
BP
1351{
1352 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
0aa66d6e 1353 struct ofport_dpif *ofport;
abe529af 1354
0aa66d6e
EJ
1355 HMAP_FOR_EACH (ofport, up.hmap_node, &ofproto->up.ports) {
1356 port_run_fast(ofport);
1357 }
1358
5fcc0d00
BP
1359 return 0;
1360}
1361
1362static int
1363run(struct ofproto *ofproto_)
1364{
1365 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
1366 struct ofport_dpif *ofport;
1367 struct ofbundle *bundle;
1368 int error;
1369
1370 if (!clogged) {
1371 complete_operations(ofproto);
1372 }
5fcc0d00
BP
1373
1374 error = run_fast(ofproto_);
1375 if (error) {
1376 return error;
abe529af
BP
1377 }
1378
abe529af 1379 if (ofproto->netflow) {
6fca1ffb
BP
1380 if (netflow_run(ofproto->netflow)) {
1381 send_netflow_active_timeouts(ofproto);
1382 }
abe529af
BP
1383 }
1384 if (ofproto->sflow) {
bae473fe 1385 dpif_sflow_run(ofproto->sflow);
abe529af
BP
1386 }
1387
1388 HMAP_FOR_EACH (ofport, up.hmap_node, &ofproto->up.ports) {
1389 port_run(ofport);
1390 }
1391 HMAP_FOR_EACH (bundle, hmap_node, &ofproto->bundles) {
1392 bundle_run(bundle);
1393 }
1394
21f7563c 1395 stp_run(ofproto);
2cc3c58e 1396 mac_learning_run(ofproto->ml, &ofproto->backer->revalidate_set);
abe529af 1397
6814e51f 1398 /* Check the consistency of a random facet, to aid debugging. */
2cc3c58e
EJ
1399 if (!hmap_is_empty(&ofproto->facets)
1400 && !ofproto->backer->need_revalidate) {
6814e51f
BP
1401 struct facet *facet;
1402
1403 facet = CONTAINER_OF(hmap_random_node(&ofproto->facets),
1404 struct facet, hmap_node);
2cc3c58e
EJ
1405 if (!tag_set_intersects(&ofproto->backer->revalidate_set,
1406 facet->tags)) {
6814e51f 1407 if (!facet_check_consistency(facet)) {
2cc3c58e 1408 ofproto->backer->need_revalidate = REV_INCONSISTENCY;
6814e51f
BP
1409 }
1410 }
1411 }
1412
9d6ac44e
BP
1413 if (ofproto->governor) {
1414 size_t n_subfacets;
1415
1416 governor_run(ofproto->governor);
1417
1418 /* If the governor has shrunk to its minimum size and the number of
1419 * subfacets has dwindled, then drop the governor entirely.
1420 *
1421 * For hysteresis, the number of subfacets to drop the governor is
1422 * smaller than the number needed to trigger its creation. */
1423 n_subfacets = hmap_count(&ofproto->subfacets);
1424 if (n_subfacets * 4 < ofproto->up.flow_eviction_threshold
1425 && governor_is_idle(ofproto->governor)) {
1426 governor_destroy(ofproto->governor);
1427 ofproto->governor = NULL;
1428 }
1429 }
1430
abe529af
BP
1431 return 0;
1432}
1433
1434static void
1435wait(struct ofproto *ofproto_)
1436{
1437 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
1438 struct ofport_dpif *ofport;
1439 struct ofbundle *bundle;
1440
7ee20df1
BP
1441 if (!clogged && !list_is_empty(&ofproto->completions)) {
1442 poll_immediate_wake();
1443 }
1444
acf60855
JP
1445 dpif_wait(ofproto->backer->dpif);
1446 dpif_recv_wait(ofproto->backer->dpif);
abe529af 1447 if (ofproto->sflow) {
bae473fe 1448 dpif_sflow_wait(ofproto->sflow);
abe529af 1449 }
2cc3c58e 1450 if (!tag_set_is_empty(&ofproto->backer->revalidate_set)) {
abe529af
BP
1451 poll_immediate_wake();
1452 }
1453 HMAP_FOR_EACH (ofport, up.hmap_node, &ofproto->up.ports) {
1454 port_wait(ofport);
1455 }
1456 HMAP_FOR_EACH (bundle, hmap_node, &ofproto->bundles) {
1457 bundle_wait(bundle);
1458 }
6fca1ffb
BP
1459 if (ofproto->netflow) {
1460 netflow_wait(ofproto->netflow);
1461 }
1c313b88 1462 mac_learning_wait(ofproto->ml);
21f7563c 1463 stp_wait(ofproto);
2cc3c58e 1464 if (ofproto->backer->need_revalidate) {
abe529af
BP
1465 /* Shouldn't happen, but if it does just go around again. */
1466 VLOG_DBG_RL(&rl, "need revalidate in ofproto_wait_cb()");
1467 poll_immediate_wake();
abe529af 1468 }
9d6ac44e
BP
1469 if (ofproto->governor) {
1470 governor_wait(ofproto->governor);
1471 }
abe529af
BP
1472}
1473
0d085684
BP
1474static void
1475get_memory_usage(const struct ofproto *ofproto_, struct simap *usage)
1476{
1477 const struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
1478
1479 simap_increase(usage, "facets", hmap_count(&ofproto->facets));
1480 simap_increase(usage, "subfacets", hmap_count(&ofproto->subfacets));
1481}
1482
abe529af
BP
1483static void
1484flush(struct ofproto *ofproto_)
1485{
1486 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
acf60855
JP
1487 struct subfacet *subfacet, *next_subfacet;
1488 struct subfacet *batch[SUBFACET_DESTROY_MAX_BATCH];
1489 int n_batch;
b0f7b9b5 1490
acf60855
JP
1491 n_batch = 0;
1492 HMAP_FOR_EACH_SAFE (subfacet, next_subfacet, hmap_node,
1493 &ofproto->subfacets) {
1494 if (subfacet->path != SF_NOT_INSTALLED) {
1495 batch[n_batch++] = subfacet;
1496 if (n_batch >= SUBFACET_DESTROY_MAX_BATCH) {
1497 subfacet_destroy_batch(ofproto, batch, n_batch);
1498 n_batch = 0;
1499 }
1500 } else {
1501 subfacet_destroy(subfacet);
b0f7b9b5 1502 }
abe529af 1503 }
acf60855
JP
1504
1505 if (n_batch > 0) {
1506 subfacet_destroy_batch(ofproto, batch, n_batch);
1507 }
abe529af
BP
1508}
1509
6c1491fb
BP
1510static void
1511get_features(struct ofproto *ofproto_ OVS_UNUSED,
9e1fd49b 1512 bool *arp_match_ip, enum ofputil_action_bitmap *actions)
6c1491fb
BP
1513{
1514 *arp_match_ip = true;
9e1fd49b
BP
1515 *actions = (OFPUTIL_A_OUTPUT |
1516 OFPUTIL_A_SET_VLAN_VID |
1517 OFPUTIL_A_SET_VLAN_PCP |
1518 OFPUTIL_A_STRIP_VLAN |
1519 OFPUTIL_A_SET_DL_SRC |
1520 OFPUTIL_A_SET_DL_DST |
1521 OFPUTIL_A_SET_NW_SRC |
1522 OFPUTIL_A_SET_NW_DST |
1523 OFPUTIL_A_SET_NW_TOS |
1524 OFPUTIL_A_SET_TP_SRC |
1525 OFPUTIL_A_SET_TP_DST |
1526 OFPUTIL_A_ENQUEUE);
6c1491fb
BP
1527}
1528
1529static void
307975da 1530get_tables(struct ofproto *ofproto_, struct ofp12_table_stats *ots)
6c1491fb
BP
1531{
1532 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
a8d9304d 1533 struct dpif_dp_stats s;
6c1491fb
BP
1534
1535 strcpy(ots->name, "classifier");
1536
acf60855
JP
1537 dpif_get_dp_stats(ofproto->backer->dpif, &s);
1538
307975da
SH
1539 ots->lookup_count = htonll(s.n_hit + s.n_missed);
1540 ots->matched_count = htonll(s.n_hit + ofproto->n_matches);
6c1491fb
BP
1541}
1542
abe529af
BP
1543static struct ofport *
1544port_alloc(void)
1545{
1546 struct ofport_dpif *port = xmalloc(sizeof *port);
1547 return &port->up;
1548}
1549
1550static void
1551port_dealloc(struct ofport *port_)
1552{
1553 struct ofport_dpif *port = ofport_dpif_cast(port_);
1554 free(port);
1555}
1556
1557static int
1558port_construct(struct ofport *port_)
1559{
1560 struct ofport_dpif *port = ofport_dpif_cast(port_);
1561 struct ofproto_dpif *ofproto = ofproto_dpif_cast(port->up.ofproto);
b9ad7294 1562 const struct netdev *netdev = port->up.netdev;
e1b1d06a
JP
1563 struct dpif_port dpif_port;
1564 int error;
abe529af 1565
2cc3c58e 1566 ofproto->backer->need_revalidate = REV_RECONFIGURE;
abe529af
BP
1567 port->bundle = NULL;
1568 port->cfm = NULL;
1569 port->tag = tag_create_random();
d5ffa7f2 1570 port->may_enable = true;
21f7563c
JP
1571 port->stp_port = NULL;
1572 port->stp_state = STP_DISABLED;
b9ad7294 1573 port->tnl_port = NULL;
8b36f51e 1574 hmap_init(&port->priorities);
52a90c29
BP
1575 port->realdev_ofp_port = 0;
1576 port->vlandev_vid = 0;
b9ad7294 1577 port->carrier_seq = netdev_get_carrier_resets(netdev);
abe529af 1578
b9ad7294 1579 if (netdev_vport_is_patch(netdev)) {
0a740f48
EJ
1580 /* XXX By bailing out here, we don't do required sFlow work. */
1581 port->odp_port = OVSP_NONE;
1582 return 0;
1583 }
1584
acf60855 1585 error = dpif_port_query_by_name(ofproto->backer->dpif,
b9ad7294 1586 netdev_vport_get_dpif_port(netdev),
e1b1d06a
JP
1587 &dpif_port);
1588 if (error) {
1589 return error;
1590 }
1591
1592 port->odp_port = dpif_port.port_no;
1593
b9ad7294
EJ
1594 if (netdev_get_tunnel_config(netdev)) {
1595 port->tnl_port = tnl_port_add(&port->up, port->odp_port);
1596 } else {
1597 /* Sanity-check that a mapping doesn't already exist. This
1598 * shouldn't happen for non-tunnel ports. */
1599 if (odp_port_to_ofp_port(ofproto, port->odp_port) != OFPP_NONE) {
1600 VLOG_ERR("port %s already has an OpenFlow port number",
1601 dpif_port.name);
da78d43d 1602 dpif_port_destroy(&dpif_port);
b9ad7294
EJ
1603 return EBUSY;
1604 }
e1b1d06a 1605
b9ad7294
EJ
1606 hmap_insert(&ofproto->backer->odp_to_ofport_map, &port->odp_port_node,
1607 hash_int(port->odp_port, 0));
1608 }
da78d43d 1609 dpif_port_destroy(&dpif_port);
e1b1d06a 1610
abe529af 1611 if (ofproto->sflow) {
e1b1d06a 1612 dpif_sflow_add_port(ofproto->sflow, port_, port->odp_port);
abe529af
BP
1613 }
1614
1615 return 0;
1616}
1617
1618static void
1619port_destruct(struct ofport *port_)
1620{
1621 struct ofport_dpif *port = ofport_dpif_cast(port_);
1622 struct ofproto_dpif *ofproto = ofproto_dpif_cast(port->up.ofproto);
b9ad7294 1623 const char *dp_port_name = netdev_vport_get_dpif_port(port->up.netdev);
02f8d646 1624 const char *devname = netdev_get_name(port->up.netdev);
abe529af 1625
b9ad7294
EJ
1626 if (dpif_port_exists(ofproto->backer->dpif, dp_port_name)
1627 && may_dpif_port_del(port)) {
acf60855
JP
1628 /* The underlying device is still there, so delete it. This
1629 * happens when the ofproto is being destroyed, since the caller
1630 * assumes that removal of attached ports will happen as part of
1631 * destruction. */
1632 dpif_port_del(ofproto->backer->dpif, port->odp_port);
b9ad7294 1633 sset_find_and_delete(&ofproto->backer->tnl_backers, dp_port_name);
acf60855
JP
1634 }
1635
b9ad7294 1636 if (port->odp_port != OVSP_NONE && !port->tnl_port) {
0a740f48
EJ
1637 hmap_remove(&ofproto->backer->odp_to_ofport_map, &port->odp_port_node);
1638 }
1639
b9ad7294 1640 tnl_port_del(port->tnl_port);
02f8d646 1641 sset_find_and_delete(&ofproto->ports, devname);
0a740f48 1642 sset_find_and_delete(&ofproto->ghost_ports, devname);
2cc3c58e 1643 ofproto->backer->need_revalidate = REV_RECONFIGURE;
abe529af 1644 bundle_remove(port_);
a5610457 1645 set_cfm(port_, NULL);
abe529af 1646 if (ofproto->sflow) {
bae473fe 1647 dpif_sflow_del_port(ofproto->sflow, port->odp_port);
abe529af 1648 }
8b36f51e
EJ
1649
1650 ofport_clear_priorities(port);
1651 hmap_destroy(&port->priorities);
abe529af
BP
1652}
1653
1654static void
1655port_modified(struct ofport *port_)
1656{
1657 struct ofport_dpif *port = ofport_dpif_cast(port_);
1658
1659 if (port->bundle && port->bundle->bond) {
1660 bond_slave_set_netdev(port->bundle->bond, port, port->up.netdev);
1661 }
1662}
1663
1664static void
9e1fd49b 1665port_reconfigured(struct ofport *port_, enum ofputil_port_config old_config)
abe529af
BP
1666{
1667 struct ofport_dpif *port = ofport_dpif_cast(port_);
1668 struct ofproto_dpif *ofproto = ofproto_dpif_cast(port->up.ofproto);
9e1fd49b 1669 enum ofputil_port_config changed = old_config ^ port->up.pp.config;
abe529af 1670
9e1fd49b 1671 if (changed & (OFPUTIL_PC_NO_RECV | OFPUTIL_PC_NO_RECV_STP |
c57b2226
BP
1672 OFPUTIL_PC_NO_FWD | OFPUTIL_PC_NO_FLOOD |
1673 OFPUTIL_PC_NO_PACKET_IN)) {
2cc3c58e 1674 ofproto->backer->need_revalidate = REV_RECONFIGURE;
7bde8dd8 1675
9e1fd49b 1676 if (changed & OFPUTIL_PC_NO_FLOOD && port->bundle) {
7bde8dd8
JP
1677 bundle_update(port->bundle);
1678 }
abe529af
BP
1679 }
1680}
1681
1682static int
1683set_sflow(struct ofproto *ofproto_,
1684 const struct ofproto_sflow_options *sflow_options)
1685{
1686 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
bae473fe 1687 struct dpif_sflow *ds = ofproto->sflow;
6ff686f2 1688
abe529af 1689 if (sflow_options) {
bae473fe 1690 if (!ds) {
abe529af
BP
1691 struct ofport_dpif *ofport;
1692
4213f19d 1693 ds = ofproto->sflow = dpif_sflow_create();
abe529af 1694 HMAP_FOR_EACH (ofport, up.hmap_node, &ofproto->up.ports) {
e1b1d06a 1695 dpif_sflow_add_port(ds, &ofport->up, ofport->odp_port);
abe529af 1696 }
2cc3c58e 1697 ofproto->backer->need_revalidate = REV_RECONFIGURE;
abe529af 1698 }
bae473fe 1699 dpif_sflow_set_options(ds, sflow_options);
abe529af 1700 } else {
6ff686f2
PS
1701 if (ds) {
1702 dpif_sflow_destroy(ds);
2cc3c58e 1703 ofproto->backer->need_revalidate = REV_RECONFIGURE;
6ff686f2
PS
1704 ofproto->sflow = NULL;
1705 }
abe529af
BP
1706 }
1707 return 0;
1708}
1709
1710static int
a5610457 1711set_cfm(struct ofport *ofport_, const struct cfm_settings *s)
abe529af
BP
1712{
1713 struct ofport_dpif *ofport = ofport_dpif_cast(ofport_);
1714 int error;
1715
a5610457 1716 if (!s) {
abe529af
BP
1717 error = 0;
1718 } else {
1719 if (!ofport->cfm) {
8c977421
EJ
1720 struct ofproto_dpif *ofproto;
1721
1722 ofproto = ofproto_dpif_cast(ofport->up.ofproto);
2cc3c58e 1723 ofproto->backer->need_revalidate = REV_RECONFIGURE;
6f629657 1724 ofport->cfm = cfm_create(netdev_get_name(ofport->up.netdev));
abe529af
BP
1725 }
1726
a5610457 1727 if (cfm_configure(ofport->cfm, s)) {
abe529af
BP
1728 return 0;
1729 }
1730
1731 error = EINVAL;
1732 }
1733 cfm_destroy(ofport->cfm);
1734 ofport->cfm = NULL;
1735 return error;
1736}
1737
1738static int
a5610457 1739get_cfm_fault(const struct ofport *ofport_)
abe529af
BP
1740{
1741 struct ofport_dpif *ofport = ofport_dpif_cast(ofport_);
a5610457
EJ
1742
1743 return ofport->cfm ? cfm_get_fault(ofport->cfm) : -1;
abe529af 1744}
1de11730 1745
1c0333b6
EJ
1746static int
1747get_cfm_opup(const struct ofport *ofport_)
1748{
1749 struct ofport_dpif *ofport = ofport_dpif_cast(ofport_);
1750
1751 return ofport->cfm ? cfm_get_opup(ofport->cfm) : -1;
1752}
1753
1de11730
EJ
1754static int
1755get_cfm_remote_mpids(const struct ofport *ofport_, const uint64_t **rmps,
1756 size_t *n_rmps)
1757{
1758 struct ofport_dpif *ofport = ofport_dpif_cast(ofport_);
1759
1760 if (ofport->cfm) {
1761 cfm_get_remote_mpids(ofport->cfm, rmps, n_rmps);
1762 return 0;
1763 } else {
1764 return -1;
1765 }
1766}
3967a833
MM
1767
1768static int
1769get_cfm_health(const struct ofport *ofport_)
1770{
1771 struct ofport_dpif *ofport = ofport_dpif_cast(ofport_);
1772
1773 return ofport->cfm ? cfm_get_health(ofport->cfm) : -1;
1774}
abe529af 1775\f
21f7563c
JP
1776/* Spanning Tree. */
1777
1778static void
1779send_bpdu_cb(struct ofpbuf *pkt, int port_num, void *ofproto_)
1780{
1781 struct ofproto_dpif *ofproto = ofproto_;
1782 struct stp_port *sp = stp_get_port(ofproto->stp, port_num);
1783 struct ofport_dpif *ofport;
1784
1785 ofport = stp_port_get_aux(sp);
1786 if (!ofport) {
1787 VLOG_WARN_RL(&rl, "%s: cannot send BPDU on unknown port %d",
1788 ofproto->up.name, port_num);
1789 } else {
1790 struct eth_header *eth = pkt->l2;
1791
1792 netdev_get_etheraddr(ofport->up.netdev, eth->eth_src);
1793 if (eth_addr_is_zero(eth->eth_src)) {
1794 VLOG_WARN_RL(&rl, "%s: cannot send BPDU on port %d "
1795 "with unknown MAC", ofproto->up.name, port_num);
1796 } else {
97d6520b 1797 send_packet(ofport, pkt);
21f7563c
JP
1798 }
1799 }
1800 ofpbuf_delete(pkt);
1801}
1802
1803/* Configures STP on 'ofproto_' using the settings defined in 's'. */
1804static int
1805set_stp(struct ofproto *ofproto_, const struct ofproto_stp_settings *s)
1806{
1807 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
1808
1809 /* Only revalidate flows if the configuration changed. */
1810 if (!s != !ofproto->stp) {
2cc3c58e 1811 ofproto->backer->need_revalidate = REV_RECONFIGURE;
21f7563c
JP
1812 }
1813
1814 if (s) {
1815 if (!ofproto->stp) {
1816 ofproto->stp = stp_create(ofproto_->name, s->system_id,
1817 send_bpdu_cb, ofproto);
1818 ofproto->stp_last_tick = time_msec();
1819 }
1820
1821 stp_set_bridge_id(ofproto->stp, s->system_id);
1822 stp_set_bridge_priority(ofproto->stp, s->priority);
1823 stp_set_hello_time(ofproto->stp, s->hello_time);
1824 stp_set_max_age(ofproto->stp, s->max_age);
1825 stp_set_forward_delay(ofproto->stp, s->fwd_delay);
1826 } else {
851bf71d
EJ
1827 struct ofport *ofport;
1828
1829 HMAP_FOR_EACH (ofport, hmap_node, &ofproto->up.ports) {
1830 set_stp_port(ofport, NULL);
1831 }
1832
21f7563c
JP
1833 stp_destroy(ofproto->stp);
1834 ofproto->stp = NULL;
1835 }
1836
1837 return 0;
1838}
1839
1840static int
1841get_stp_status(struct ofproto *ofproto_, struct ofproto_stp_status *s)
1842{
1843 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
1844
1845 if (ofproto->stp) {
1846 s->enabled = true;
1847 s->bridge_id = stp_get_bridge_id(ofproto->stp);
1848 s->designated_root = stp_get_designated_root(ofproto->stp);
1849 s->root_path_cost = stp_get_root_path_cost(ofproto->stp);
1850 } else {
1851 s->enabled = false;
1852 }
1853
1854 return 0;
1855}
1856
1857static void
1858update_stp_port_state(struct ofport_dpif *ofport)
1859{
1860 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofport->up.ofproto);
1861 enum stp_state state;
1862
1863 /* Figure out new state. */
1864 state = ofport->stp_port ? stp_port_get_state(ofport->stp_port)
1865 : STP_DISABLED;
1866
1867 /* Update state. */
1868 if (ofport->stp_state != state) {
9e1fd49b 1869 enum ofputil_port_state of_state;
21f7563c
JP
1870 bool fwd_change;
1871
1872 VLOG_DBG_RL(&rl, "port %s: STP state changed from %s to %s",
1873 netdev_get_name(ofport->up.netdev),
1874 stp_state_name(ofport->stp_state),
1875 stp_state_name(state));
1876 if (stp_learn_in_state(ofport->stp_state)
1877 != stp_learn_in_state(state)) {
1878 /* xxx Learning action flows should also be flushed. */
2cc3c58e
EJ
1879 mac_learning_flush(ofproto->ml,
1880 &ofproto->backer->revalidate_set);
21f7563c
JP
1881 }
1882 fwd_change = stp_forward_in_state(ofport->stp_state)
1883 != stp_forward_in_state(state);
1884
2cc3c58e 1885 ofproto->backer->need_revalidate = REV_STP;
21f7563c
JP
1886 ofport->stp_state = state;
1887 ofport->stp_state_entered = time_msec();
1888
b308140a 1889 if (fwd_change && ofport->bundle) {
21f7563c
JP
1890 bundle_update(ofport->bundle);
1891 }
1892
1893 /* Update the STP state bits in the OpenFlow port description. */
9e1fd49b
BP
1894 of_state = ofport->up.pp.state & ~OFPUTIL_PS_STP_MASK;
1895 of_state |= (state == STP_LISTENING ? OFPUTIL_PS_STP_LISTEN
1896 : state == STP_LEARNING ? OFPUTIL_PS_STP_LEARN
1897 : state == STP_FORWARDING ? OFPUTIL_PS_STP_FORWARD
1898 : state == STP_BLOCKING ? OFPUTIL_PS_STP_BLOCK
1899 : 0);
21f7563c
JP
1900 ofproto_port_set_state(&ofport->up, of_state);
1901 }
1902}
1903
1904/* Configures STP on 'ofport_' using the settings defined in 's'. The
1905 * caller is responsible for assigning STP port numbers and ensuring
1906 * there are no duplicates. */
1907static int
1908set_stp_port(struct ofport *ofport_,
1909 const struct ofproto_port_stp_settings *s)
1910{
1911 struct ofport_dpif *ofport = ofport_dpif_cast(ofport_);
1912 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofport->up.ofproto);
1913 struct stp_port *sp = ofport->stp_port;
1914
1915 if (!s || !s->enable) {
1916 if (sp) {
1917 ofport->stp_port = NULL;
1918 stp_port_disable(sp);
ecd12731 1919 update_stp_port_state(ofport);
21f7563c
JP
1920 }
1921 return 0;
1922 } else if (sp && stp_port_no(sp) != s->port_num
1923 && ofport == stp_port_get_aux(sp)) {
1924 /* The port-id changed, so disable the old one if it's not
1925 * already in use by another port. */
1926 stp_port_disable(sp);
1927 }
1928
1929 sp = ofport->stp_port = stp_get_port(ofproto->stp, s->port_num);
1930 stp_port_enable(sp);
1931
1932 stp_port_set_aux(sp, ofport);
1933 stp_port_set_priority(sp, s->priority);
1934 stp_port_set_path_cost(sp, s->path_cost);
1935
1936 update_stp_port_state(ofport);
1937
1938 return 0;
1939}
1940
1941static int
1942get_stp_port_status(struct ofport *ofport_,
1943 struct ofproto_port_stp_status *s)
1944{
1945 struct ofport_dpif *ofport = ofport_dpif_cast(ofport_);
1946 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofport->up.ofproto);
1947 struct stp_port *sp = ofport->stp_port;
1948
1949 if (!ofproto->stp || !sp) {
1950 s->enabled = false;
1951 return 0;
1952 }
1953
1954 s->enabled = true;
1955 s->port_id = stp_port_get_id(sp);
1956 s->state = stp_port_get_state(sp);
1957 s->sec_in_state = (time_msec() - ofport->stp_state_entered) / 1000;
1958 s->role = stp_port_get_role(sp);
80740385 1959 stp_port_get_counts(sp, &s->tx_count, &s->rx_count, &s->error_count);
21f7563c
JP
1960
1961 return 0;
1962}
1963
1964static void
1965stp_run(struct ofproto_dpif *ofproto)
1966{
1967 if (ofproto->stp) {
1968 long long int now = time_msec();
1969 long long int elapsed = now - ofproto->stp_last_tick;
1970 struct stp_port *sp;
1971
1972 if (elapsed > 0) {
1973 stp_tick(ofproto->stp, MIN(INT_MAX, elapsed));
1974 ofproto->stp_last_tick = now;
1975 }
1976 while (stp_get_changed_port(ofproto->stp, &sp)) {
1977 struct ofport_dpif *ofport = stp_port_get_aux(sp);
1978
1979 if (ofport) {
1980 update_stp_port_state(ofport);
1981 }
1982 }
6ae50723
EJ
1983
1984 if (stp_check_and_reset_fdb_flush(ofproto->stp)) {
2cc3c58e 1985 mac_learning_flush(ofproto->ml, &ofproto->backer->revalidate_set);
6ae50723 1986 }
21f7563c
JP
1987 }
1988}
1989
1990static void
1991stp_wait(struct ofproto_dpif *ofproto)
1992{
1993 if (ofproto->stp) {
1994 poll_timer_wait(1000);
1995 }
1996}
1997
1998/* Returns true if STP should process 'flow'. */
1999static bool
2000stp_should_process_flow(const struct flow *flow)
2001{
2002 return eth_addr_equals(flow->dl_dst, eth_addr_stp);
2003}
2004
2005static void
2006stp_process_packet(const struct ofport_dpif *ofport,
2007 const struct ofpbuf *packet)
2008{
2009 struct ofpbuf payload = *packet;
2010 struct eth_header *eth = payload.data;
2011 struct stp_port *sp = ofport->stp_port;
2012
2013 /* Sink packets on ports that have STP disabled when the bridge has
2014 * STP enabled. */
2015 if (!sp || stp_port_get_state(sp) == STP_DISABLED) {
2016 return;
2017 }
2018
2019 /* Trim off padding on payload. */
c573540b
BP
2020 if (payload.size > ntohs(eth->eth_type) + ETH_HEADER_LEN) {
2021 payload.size = ntohs(eth->eth_type) + ETH_HEADER_LEN;
21f7563c
JP
2022 }
2023
2024 if (ofpbuf_try_pull(&payload, ETH_HEADER_LEN + LLC_HEADER_LEN)) {
2025 stp_received_bpdu(sp, payload.data, payload.size);
2026 }
2027}
2028\f
8b36f51e
EJ
2029static struct priority_to_dscp *
2030get_priority(const struct ofport_dpif *ofport, uint32_t priority)
2031{
2032 struct priority_to_dscp *pdscp;
2033 uint32_t hash;
2034
2035 hash = hash_int(priority, 0);
2036 HMAP_FOR_EACH_IN_BUCKET (pdscp, hmap_node, hash, &ofport->priorities) {
2037 if (pdscp->priority == priority) {
2038 return pdscp;
2039 }
2040 }
2041 return NULL;
2042}
2043
2044static void
2045ofport_clear_priorities(struct ofport_dpif *ofport)
2046{
2047 struct priority_to_dscp *pdscp, *next;
2048
2049 HMAP_FOR_EACH_SAFE (pdscp, next, hmap_node, &ofport->priorities) {
2050 hmap_remove(&ofport->priorities, &pdscp->hmap_node);
2051 free(pdscp);
2052 }
2053}
2054
2055static int
2056set_queues(struct ofport *ofport_,
2057 const struct ofproto_port_queue *qdscp_list,
2058 size_t n_qdscp)
2059{
2060 struct ofport_dpif *ofport = ofport_dpif_cast(ofport_);
2061 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofport->up.ofproto);
2062 struct hmap new = HMAP_INITIALIZER(&new);
2063 size_t i;
2064
2065 for (i = 0; i < n_qdscp; i++) {
2066 struct priority_to_dscp *pdscp;
2067 uint32_t priority;
2068 uint8_t dscp;
2069
2070 dscp = (qdscp_list[i].dscp << 2) & IP_DSCP_MASK;
acf60855 2071 if (dpif_queue_to_priority(ofproto->backer->dpif, qdscp_list[i].queue,
8b36f51e
EJ
2072 &priority)) {
2073 continue;
2074 }
2075
2076 pdscp = get_priority(ofport, priority);
2077 if (pdscp) {
2078 hmap_remove(&ofport->priorities, &pdscp->hmap_node);
2079 } else {
2080 pdscp = xmalloc(sizeof *pdscp);
2081 pdscp->priority = priority;
2082 pdscp->dscp = dscp;
2cc3c58e 2083 ofproto->backer->need_revalidate = REV_RECONFIGURE;
8b36f51e
EJ
2084 }
2085
2086 if (pdscp->dscp != dscp) {
2087 pdscp->dscp = dscp;
2cc3c58e 2088 ofproto->backer->need_revalidate = REV_RECONFIGURE;
8b36f51e
EJ
2089 }
2090
2091 hmap_insert(&new, &pdscp->hmap_node, hash_int(pdscp->priority, 0));
2092 }
2093
2094 if (!hmap_is_empty(&ofport->priorities)) {
2095 ofport_clear_priorities(ofport);
2cc3c58e 2096 ofproto->backer->need_revalidate = REV_RECONFIGURE;
8b36f51e
EJ
2097 }
2098
2099 hmap_swap(&new, &ofport->priorities);
2100 hmap_destroy(&new);
2101
2102 return 0;
2103}
2104\f
abe529af
BP
2105/* Bundles. */
2106
b44a10b7
BP
2107/* Expires all MAC learning entries associated with 'bundle' and forces its
2108 * ofproto to revalidate every flow.
2109 *
2110 * Normally MAC learning entries are removed only from the ofproto associated
2111 * with 'bundle', but if 'all_ofprotos' is true, then the MAC learning entries
2112 * are removed from every ofproto. When patch ports and SLB bonds are in use
2113 * and a VM migration happens and the gratuitous ARPs are somehow lost, this
2114 * avoids a MAC_ENTRY_IDLE_TIME delay before the migrated VM can communicate
2115 * with the host from which it migrated. */
abe529af 2116static void
b44a10b7 2117bundle_flush_macs(struct ofbundle *bundle, bool all_ofprotos)
abe529af
BP
2118{
2119 struct ofproto_dpif *ofproto = bundle->ofproto;
2120 struct mac_learning *ml = ofproto->ml;
2121 struct mac_entry *mac, *next_mac;
2122
2cc3c58e 2123 ofproto->backer->need_revalidate = REV_RECONFIGURE;
abe529af
BP
2124 LIST_FOR_EACH_SAFE (mac, next_mac, lru_node, &ml->lrus) {
2125 if (mac->port.p == bundle) {
b44a10b7
BP
2126 if (all_ofprotos) {
2127 struct ofproto_dpif *o;
2128
2129 HMAP_FOR_EACH (o, all_ofproto_dpifs_node, &all_ofproto_dpifs) {
2130 if (o != ofproto) {
2131 struct mac_entry *e;
2132
2133 e = mac_learning_lookup(o->ml, mac->mac, mac->vlan,
2134 NULL);
2135 if (e) {
b44a10b7
BP
2136 mac_learning_expire(o->ml, e);
2137 }
2138 }
2139 }
2140 }
2141
abe529af
BP
2142 mac_learning_expire(ml, mac);
2143 }
2144 }
2145}
2146
2147static struct ofbundle *
2148bundle_lookup(const struct ofproto_dpif *ofproto, void *aux)
2149{
2150 struct ofbundle *bundle;
2151
2152 HMAP_FOR_EACH_IN_BUCKET (bundle, hmap_node, hash_pointer(aux, 0),
2153 &ofproto->bundles) {
2154 if (bundle->aux == aux) {
2155 return bundle;
2156 }
2157 }
2158 return NULL;
2159}
2160
2161/* Looks up each of the 'n_auxes' pointers in 'auxes' as bundles and adds the
2162 * ones that are found to 'bundles'. */
2163static void
2164bundle_lookup_multiple(struct ofproto_dpif *ofproto,
2165 void **auxes, size_t n_auxes,
2166 struct hmapx *bundles)
2167{
2168 size_t i;
2169
2170 hmapx_init(bundles);
2171 for (i = 0; i < n_auxes; i++) {
2172 struct ofbundle *bundle = bundle_lookup(ofproto, auxes[i]);
2173 if (bundle) {
2174 hmapx_add(bundles, bundle);
2175 }
2176 }
2177}
2178
7bde8dd8
JP
2179static void
2180bundle_update(struct ofbundle *bundle)
2181{
2182 struct ofport_dpif *port;
2183
2184 bundle->floodable = true;
2185 LIST_FOR_EACH (port, bundle_node, &bundle->ports) {
9e1fd49b
BP
2186 if (port->up.pp.config & OFPUTIL_PC_NO_FLOOD
2187 || !stp_forward_in_state(port->stp_state)) {
7bde8dd8
JP
2188 bundle->floodable = false;
2189 break;
2190 }
2191 }
2192}
2193
abe529af
BP
2194static void
2195bundle_del_port(struct ofport_dpif *port)
2196{
2197 struct ofbundle *bundle = port->bundle;
2198
2cc3c58e 2199 bundle->ofproto->backer->need_revalidate = REV_RECONFIGURE;
6f77f4ae 2200
abe529af
BP
2201 list_remove(&port->bundle_node);
2202 port->bundle = NULL;
2203
2204 if (bundle->lacp) {
2205 lacp_slave_unregister(bundle->lacp, port);
2206 }
2207 if (bundle->bond) {
2208 bond_slave_unregister(bundle->bond, port);
2209 }
2210
7bde8dd8 2211 bundle_update(bundle);
abe529af
BP
2212}
2213
2214static bool
2215bundle_add_port(struct ofbundle *bundle, uint32_t ofp_port,
df53d41c 2216 struct lacp_slave_settings *lacp)
abe529af
BP
2217{
2218 struct ofport_dpif *port;
2219
2220 port = get_ofp_port(bundle->ofproto, ofp_port);
2221 if (!port) {
2222 return false;
2223 }
2224
2225 if (port->bundle != bundle) {
2cc3c58e 2226 bundle->ofproto->backer->need_revalidate = REV_RECONFIGURE;
abe529af
BP
2227 if (port->bundle) {
2228 bundle_del_port(port);
2229 }
2230
2231 port->bundle = bundle;
2232 list_push_back(&bundle->ports, &port->bundle_node);
9e1fd49b
BP
2233 if (port->up.pp.config & OFPUTIL_PC_NO_FLOOD
2234 || !stp_forward_in_state(port->stp_state)) {
abe529af
BP
2235 bundle->floodable = false;
2236 }
2237 }
2238 if (lacp) {
2cc3c58e 2239 bundle->ofproto->backer->need_revalidate = REV_RECONFIGURE;
abe529af
BP
2240 lacp_slave_register(bundle->lacp, port, lacp);
2241 }
2242
2243 return true;
2244}
2245
2246static void
2247bundle_destroy(struct ofbundle *bundle)
2248{
2249 struct ofproto_dpif *ofproto;
2250 struct ofport_dpif *port, *next_port;
2251 int i;
2252
2253 if (!bundle) {
2254 return;
2255 }
2256
2257 ofproto = bundle->ofproto;
2258 for (i = 0; i < MAX_MIRRORS; i++) {
2259 struct ofmirror *m = ofproto->mirrors[i];
2260 if (m) {
2261 if (m->out == bundle) {
2262 mirror_destroy(m);
2263 } else if (hmapx_find_and_delete(&m->srcs, bundle)
2264 || hmapx_find_and_delete(&m->dsts, bundle)) {
2cc3c58e 2265 ofproto->backer->need_revalidate = REV_RECONFIGURE;
abe529af
BP
2266 }
2267 }
2268 }
2269
2270 LIST_FOR_EACH_SAFE (port, next_port, bundle_node, &bundle->ports) {
2271 bundle_del_port(port);
2272 }
2273
b44a10b7 2274 bundle_flush_macs(bundle, true);
abe529af
BP
2275 hmap_remove(&ofproto->bundles, &bundle->hmap_node);
2276 free(bundle->name);
2277 free(bundle->trunks);
2278 lacp_destroy(bundle->lacp);
2279 bond_destroy(bundle->bond);
2280 free(bundle);
2281}
2282
2283static int
2284bundle_set(struct ofproto *ofproto_, void *aux,
2285 const struct ofproto_bundle_settings *s)
2286{
2287 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
2288 bool need_flush = false;
abe529af
BP
2289 struct ofport_dpif *port;
2290 struct ofbundle *bundle;
ecac4ebf
BP
2291 unsigned long *trunks;
2292 int vlan;
abe529af
BP
2293 size_t i;
2294 bool ok;
2295
2296 if (!s) {
2297 bundle_destroy(bundle_lookup(ofproto, aux));
2298 return 0;
2299 }
2300
cb22974d
BP
2301 ovs_assert(s->n_slaves == 1 || s->bond != NULL);
2302 ovs_assert((s->lacp != NULL) == (s->lacp_slaves != NULL));
abe529af
BP
2303
2304 bundle = bundle_lookup(ofproto, aux);
2305 if (!bundle) {
2306 bundle = xmalloc(sizeof *bundle);
2307
2308 bundle->ofproto = ofproto;
2309 hmap_insert(&ofproto->bundles, &bundle->hmap_node,
2310 hash_pointer(aux, 0));
2311 bundle->aux = aux;
2312 bundle->name = NULL;
2313
2314 list_init(&bundle->ports);
ecac4ebf 2315 bundle->vlan_mode = PORT_VLAN_TRUNK;
abe529af
BP
2316 bundle->vlan = -1;
2317 bundle->trunks = NULL;
5e9ceccd 2318 bundle->use_priority_tags = s->use_priority_tags;
abe529af
BP
2319 bundle->lacp = NULL;
2320 bundle->bond = NULL;
2321
2322 bundle->floodable = true;
2323
2324 bundle->src_mirrors = 0;
2325 bundle->dst_mirrors = 0;
2326 bundle->mirror_out = 0;
2327 }
2328
2329 if (!bundle->name || strcmp(s->name, bundle->name)) {
2330 free(bundle->name);
2331 bundle->name = xstrdup(s->name);
2332 }
2333
2334 /* LACP. */
2335 if (s->lacp) {
2336 if (!bundle->lacp) {
2cc3c58e 2337 ofproto->backer->need_revalidate = REV_RECONFIGURE;
abe529af
BP
2338 bundle->lacp = lacp_create();
2339 }
2340 lacp_configure(bundle->lacp, s->lacp);
2341 } else {
2342 lacp_destroy(bundle->lacp);
2343 bundle->lacp = NULL;
2344 }
2345
2346 /* Update set of ports. */
2347 ok = true;
2348 for (i = 0; i < s->n_slaves; i++) {
2349 if (!bundle_add_port(bundle, s->slaves[i],
df53d41c 2350 s->lacp ? &s->lacp_slaves[i] : NULL)) {
abe529af
BP
2351 ok = false;
2352 }
2353 }
2354 if (!ok || list_size(&bundle->ports) != s->n_slaves) {
2355 struct ofport_dpif *next_port;
2356
2357 LIST_FOR_EACH_SAFE (port, next_port, bundle_node, &bundle->ports) {
2358 for (i = 0; i < s->n_slaves; i++) {
56c769ab 2359 if (s->slaves[i] == port->up.ofp_port) {
abe529af
BP
2360 goto found;
2361 }
2362 }
2363
2364 bundle_del_port(port);
2365 found: ;
2366 }
2367 }
cb22974d 2368 ovs_assert(list_size(&bundle->ports) <= s->n_slaves);
abe529af
BP
2369
2370 if (list_is_empty(&bundle->ports)) {
2371 bundle_destroy(bundle);
2372 return EINVAL;
2373 }
2374
ecac4ebf 2375 /* Set VLAN tagging mode */
5e9ceccd
BP
2376 if (s->vlan_mode != bundle->vlan_mode
2377 || s->use_priority_tags != bundle->use_priority_tags) {
ecac4ebf 2378 bundle->vlan_mode = s->vlan_mode;
5e9ceccd 2379 bundle->use_priority_tags = s->use_priority_tags;
ecac4ebf
BP
2380 need_flush = true;
2381 }
2382
abe529af 2383 /* Set VLAN tag. */
ecac4ebf
BP
2384 vlan = (s->vlan_mode == PORT_VLAN_TRUNK ? -1
2385 : s->vlan >= 0 && s->vlan <= 4095 ? s->vlan
2386 : 0);
2387 if (vlan != bundle->vlan) {
2388 bundle->vlan = vlan;
abe529af
BP
2389 need_flush = true;
2390 }
2391
2392 /* Get trunked VLANs. */
ecac4ebf
BP
2393 switch (s->vlan_mode) {
2394 case PORT_VLAN_ACCESS:
2395 trunks = NULL;
2396 break;
2397
2398 case PORT_VLAN_TRUNK:
ebc56baa 2399 trunks = CONST_CAST(unsigned long *, s->trunks);
ecac4ebf
BP
2400 break;
2401
2402 case PORT_VLAN_NATIVE_UNTAGGED:
2403 case PORT_VLAN_NATIVE_TAGGED:
2404 if (vlan != 0 && (!s->trunks
2405 || !bitmap_is_set(s->trunks, vlan)
2406 || bitmap_is_set(s->trunks, 0))) {
2407 /* Force trunking the native VLAN and prohibit trunking VLAN 0. */
2408 if (s->trunks) {
2409 trunks = bitmap_clone(s->trunks, 4096);
2410 } else {
2411 trunks = bitmap_allocate1(4096);
2412 }
2413 bitmap_set1(trunks, vlan);
2414 bitmap_set0(trunks, 0);
2415 } else {
ebc56baa 2416 trunks = CONST_CAST(unsigned long *, s->trunks);
ecac4ebf
BP
2417 }
2418 break;
2419
2420 default:
2421 NOT_REACHED();
2422 }
abe529af
BP
2423 if (!vlan_bitmap_equal(trunks, bundle->trunks)) {
2424 free(bundle->trunks);
ecac4ebf
BP
2425 if (trunks == s->trunks) {
2426 bundle->trunks = vlan_bitmap_clone(trunks);
2427 } else {
2428 bundle->trunks = trunks;
2429 trunks = NULL;
2430 }
abe529af
BP
2431 need_flush = true;
2432 }
ecac4ebf
BP
2433 if (trunks != s->trunks) {
2434 free(trunks);
2435 }
abe529af
BP
2436
2437 /* Bonding. */
2438 if (!list_is_short(&bundle->ports)) {
2439 bundle->ofproto->has_bonded_bundles = true;
2440 if (bundle->bond) {
2441 if (bond_reconfigure(bundle->bond, s->bond)) {
2cc3c58e 2442 ofproto->backer->need_revalidate = REV_RECONFIGURE;
abe529af
BP
2443 }
2444 } else {
2445 bundle->bond = bond_create(s->bond);
2cc3c58e 2446 ofproto->backer->need_revalidate = REV_RECONFIGURE;
abe529af
BP
2447 }
2448
2449 LIST_FOR_EACH (port, bundle_node, &bundle->ports) {
df53d41c 2450 bond_slave_register(bundle->bond, port, port->up.netdev);
abe529af
BP
2451 }
2452 } else {
2453 bond_destroy(bundle->bond);
2454 bundle->bond = NULL;
2455 }
2456
2457 /* If we changed something that would affect MAC learning, un-learn
2458 * everything on this port and force flow revalidation. */
2459 if (need_flush) {
b44a10b7 2460 bundle_flush_macs(bundle, false);
abe529af
BP
2461 }
2462
2463 return 0;
2464}
2465
2466static void
2467bundle_remove(struct ofport *port_)
2468{
2469 struct ofport_dpif *port = ofport_dpif_cast(port_);
2470 struct ofbundle *bundle = port->bundle;
2471
2472 if (bundle) {
2473 bundle_del_port(port);
2474 if (list_is_empty(&bundle->ports)) {
2475 bundle_destroy(bundle);
2476 } else if (list_is_short(&bundle->ports)) {
2477 bond_destroy(bundle->bond);
2478 bundle->bond = NULL;
2479 }
2480 }
2481}
2482
2483static void
5f877369 2484send_pdu_cb(void *port_, const void *pdu, size_t pdu_size)
abe529af
BP
2485{
2486 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 10);
2487 struct ofport_dpif *port = port_;
2488 uint8_t ea[ETH_ADDR_LEN];
2489 int error;
2490
2491 error = netdev_get_etheraddr(port->up.netdev, ea);
2492 if (!error) {
abe529af 2493 struct ofpbuf packet;
5f877369 2494 void *packet_pdu;
abe529af
BP
2495
2496 ofpbuf_init(&packet, 0);
2497 packet_pdu = eth_compose(&packet, eth_addr_lacp, ea, ETH_TYPE_LACP,
5f877369
EJ
2498 pdu_size);
2499 memcpy(packet_pdu, pdu, pdu_size);
2500
97d6520b 2501 send_packet(port, &packet);
abe529af
BP
2502 ofpbuf_uninit(&packet);
2503 } else {
2504 VLOG_ERR_RL(&rl, "port %s: cannot obtain Ethernet address of iface "
2505 "%s (%s)", port->bundle->name,
2506 netdev_get_name(port->up.netdev), strerror(error));
2507 }
2508}
2509
2510static void
2511bundle_send_learning_packets(struct ofbundle *bundle)
2512{
2513 struct ofproto_dpif *ofproto = bundle->ofproto;
2514 int error, n_packets, n_errors;
2515 struct mac_entry *e;
2516
2517 error = n_packets = n_errors = 0;
2518 LIST_FOR_EACH (e, lru_node, &ofproto->ml->lrus) {
2519 if (e->port.p != bundle) {
ea131871
JG
2520 struct ofpbuf *learning_packet;
2521 struct ofport_dpif *port;
4dd1e3ca 2522 void *port_void;
ea131871
JG
2523 int ret;
2524
4dd1e3ca
BP
2525 /* The assignment to "port" is unnecessary but makes "grep"ing for
2526 * struct ofport_dpif more effective. */
2527 learning_packet = bond_compose_learning_packet(bundle->bond,
2528 e->mac, e->vlan,
2529 &port_void);
2530 port = port_void;
97d6520b 2531 ret = send_packet(port, learning_packet);
ea131871 2532 ofpbuf_delete(learning_packet);
abe529af
BP
2533 if (ret) {
2534 error = ret;
2535 n_errors++;
2536 }
2537 n_packets++;
2538 }
2539 }
2540
2541 if (n_errors) {
2542 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
2543 VLOG_WARN_RL(&rl, "bond %s: %d errors sending %d gratuitous learning "
2544 "packets, last error was: %s",
2545 bundle->name, n_errors, n_packets, strerror(error));
2546 } else {
2547 VLOG_DBG("bond %s: sent %d gratuitous learning packets",
2548 bundle->name, n_packets);
2549 }
2550}
2551
2552static void
2553bundle_run(struct ofbundle *bundle)
2554{
2555 if (bundle->lacp) {
2556 lacp_run(bundle->lacp, send_pdu_cb);
2557 }
2558 if (bundle->bond) {
2559 struct ofport_dpif *port;
2560
2561 LIST_FOR_EACH (port, bundle_node, &bundle->ports) {
015e08bc 2562 bond_slave_set_may_enable(bundle->bond, port, port->may_enable);
abe529af
BP
2563 }
2564
2cc3c58e 2565 bond_run(bundle->bond, &bundle->ofproto->backer->revalidate_set,
bdebeece 2566 lacp_status(bundle->lacp));
abe529af
BP
2567 if (bond_should_send_learning_packets(bundle->bond)) {
2568 bundle_send_learning_packets(bundle);
2569 }
2570 }
2571}
2572
2573static void
2574bundle_wait(struct ofbundle *bundle)
2575{
2576 if (bundle->lacp) {
2577 lacp_wait(bundle->lacp);
2578 }
2579 if (bundle->bond) {
2580 bond_wait(bundle->bond);
2581 }
2582}
2583\f
2584/* Mirrors. */
2585
2586static int
2587mirror_scan(struct ofproto_dpif *ofproto)
2588{
2589 int idx;
2590
2591 for (idx = 0; idx < MAX_MIRRORS; idx++) {
2592 if (!ofproto->mirrors[idx]) {
2593 return idx;
2594 }
2595 }
2596 return -1;
2597}
2598
2599static struct ofmirror *
2600mirror_lookup(struct ofproto_dpif *ofproto, void *aux)
2601{
2602 int i;
2603
2604 for (i = 0; i < MAX_MIRRORS; i++) {
2605 struct ofmirror *mirror = ofproto->mirrors[i];
2606 if (mirror && mirror->aux == aux) {
2607 return mirror;
2608 }
2609 }
2610
2611 return NULL;
2612}
2613
9ba15e2a
BP
2614/* Update the 'dup_mirrors' member of each of the ofmirrors in 'ofproto'. */
2615static void
2616mirror_update_dups(struct ofproto_dpif *ofproto)
2617{
2618 int i;
2619
2620 for (i = 0; i < MAX_MIRRORS; i++) {
2621 struct ofmirror *m = ofproto->mirrors[i];
2622
2623 if (m) {
2624 m->dup_mirrors = MIRROR_MASK_C(1) << i;
2625 }
2626 }
2627
2628 for (i = 0; i < MAX_MIRRORS; i++) {
2629 struct ofmirror *m1 = ofproto->mirrors[i];
2630 int j;
2631
2632 if (!m1) {
2633 continue;
2634 }
2635
2636 for (j = i + 1; j < MAX_MIRRORS; j++) {
2637 struct ofmirror *m2 = ofproto->mirrors[j];
2638
edb0540b 2639 if (m2 && m1->out == m2->out && m1->out_vlan == m2->out_vlan) {
9ba15e2a
BP
2640 m1->dup_mirrors |= MIRROR_MASK_C(1) << j;
2641 m2->dup_mirrors |= m1->dup_mirrors;
2642 }
2643 }
2644 }
2645}
2646
abe529af
BP
2647static int
2648mirror_set(struct ofproto *ofproto_, void *aux,
2649 const struct ofproto_mirror_settings *s)
2650{
2651 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
2652 mirror_mask_t mirror_bit;
2653 struct ofbundle *bundle;
2654 struct ofmirror *mirror;
2655 struct ofbundle *out;
2656 struct hmapx srcs; /* Contains "struct ofbundle *"s. */
2657 struct hmapx dsts; /* Contains "struct ofbundle *"s. */
2658 int out_vlan;
2659
2660 mirror = mirror_lookup(ofproto, aux);
2661 if (!s) {
2662 mirror_destroy(mirror);
2663 return 0;
2664 }
2665 if (!mirror) {
2666 int idx;
2667
2668 idx = mirror_scan(ofproto);
2669 if (idx < 0) {
2670 VLOG_WARN("bridge %s: maximum of %d port mirrors reached, "
2671 "cannot create %s",
2672 ofproto->up.name, MAX_MIRRORS, s->name);
2673 return EFBIG;
2674 }
2675
2676 mirror = ofproto->mirrors[idx] = xzalloc(sizeof *mirror);
2677 mirror->ofproto = ofproto;
2678 mirror->idx = idx;
8b28d864 2679 mirror->aux = aux;
abe529af
BP
2680 mirror->out_vlan = -1;
2681 mirror->name = NULL;
2682 }
2683
2684 if (!mirror->name || strcmp(s->name, mirror->name)) {
2685 free(mirror->name);
2686 mirror->name = xstrdup(s->name);
2687 }
2688
2689 /* Get the new configuration. */
2690 if (s->out_bundle) {
2691 out = bundle_lookup(ofproto, s->out_bundle);
2692 if (!out) {
2693 mirror_destroy(mirror);
2694 return EINVAL;
2695 }
2696 out_vlan = -1;
2697 } else {
2698 out = NULL;
2699 out_vlan = s->out_vlan;
2700 }
2701 bundle_lookup_multiple(ofproto, s->srcs, s->n_srcs, &srcs);
2702 bundle_lookup_multiple(ofproto, s->dsts, s->n_dsts, &dsts);
2703
2704 /* If the configuration has not changed, do nothing. */
2705 if (hmapx_equals(&srcs, &mirror->srcs)
2706 && hmapx_equals(&dsts, &mirror->dsts)
2707 && vlan_bitmap_equal(mirror->vlans, s->src_vlans)
2708 && mirror->out == out
2709 && mirror->out_vlan == out_vlan)
2710 {
2711 hmapx_destroy(&srcs);
2712 hmapx_destroy(&dsts);
2713 return 0;
2714 }
2715
2716 hmapx_swap(&srcs, &mirror->srcs);
2717 hmapx_destroy(&srcs);
2718
2719 hmapx_swap(&dsts, &mirror->dsts);
2720 hmapx_destroy(&dsts);
2721
2722 free(mirror->vlans);
2723 mirror->vlans = vlan_bitmap_clone(s->src_vlans);
2724
2725 mirror->out = out;
2726 mirror->out_vlan = out_vlan;
2727
2728 /* Update bundles. */
2729 mirror_bit = MIRROR_MASK_C(1) << mirror->idx;
2730 HMAP_FOR_EACH (bundle, hmap_node, &mirror->ofproto->bundles) {
2731 if (hmapx_contains(&mirror->srcs, bundle)) {
2732 bundle->src_mirrors |= mirror_bit;
2733 } else {
2734 bundle->src_mirrors &= ~mirror_bit;
2735 }
2736
2737 if (hmapx_contains(&mirror->dsts, bundle)) {
2738 bundle->dst_mirrors |= mirror_bit;
2739 } else {
2740 bundle->dst_mirrors &= ~mirror_bit;
2741 }
2742
2743 if (mirror->out == bundle) {
2744 bundle->mirror_out |= mirror_bit;
2745 } else {
2746 bundle->mirror_out &= ~mirror_bit;
2747 }
2748 }
2749
2cc3c58e 2750 ofproto->backer->need_revalidate = REV_RECONFIGURE;
ccb7c863 2751 ofproto->has_mirrors = true;
2cc3c58e
EJ
2752 mac_learning_flush(ofproto->ml,
2753 &ofproto->backer->revalidate_set);
9ba15e2a 2754 mirror_update_dups(ofproto);
abe529af
BP
2755
2756 return 0;
2757}
2758
2759static void
2760mirror_destroy(struct ofmirror *mirror)
2761{
2762 struct ofproto_dpif *ofproto;
2763 mirror_mask_t mirror_bit;
2764 struct ofbundle *bundle;
ccb7c863 2765 int i;
abe529af
BP
2766
2767 if (!mirror) {
2768 return;
2769 }
2770
2771 ofproto = mirror->ofproto;
2cc3c58e
EJ
2772 ofproto->backer->need_revalidate = REV_RECONFIGURE;
2773 mac_learning_flush(ofproto->ml, &ofproto->backer->revalidate_set);
abe529af
BP
2774
2775 mirror_bit = MIRROR_MASK_C(1) << mirror->idx;
2776 HMAP_FOR_EACH (bundle, hmap_node, &ofproto->bundles) {
2777 bundle->src_mirrors &= ~mirror_bit;
2778 bundle->dst_mirrors &= ~mirror_bit;
2779 bundle->mirror_out &= ~mirror_bit;
2780 }
2781
2782 hmapx_destroy(&mirror->srcs);
2783 hmapx_destroy(&mirror->dsts);
2784 free(mirror->vlans);
2785
2786 ofproto->mirrors[mirror->idx] = NULL;
2787 free(mirror->name);
2788 free(mirror);
9ba15e2a
BP
2789
2790 mirror_update_dups(ofproto);
ccb7c863
BP
2791
2792 ofproto->has_mirrors = false;
2793 for (i = 0; i < MAX_MIRRORS; i++) {
2794 if (ofproto->mirrors[i]) {
2795 ofproto->has_mirrors = true;
2796 break;
2797 }
2798 }
abe529af
BP
2799}
2800
9d24de3b
JP
2801static int
2802mirror_get_stats(struct ofproto *ofproto_, void *aux,
2803 uint64_t *packets, uint64_t *bytes)
2804{
2805 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
2806 struct ofmirror *mirror = mirror_lookup(ofproto, aux);
2807
2808 if (!mirror) {
2809 *packets = *bytes = UINT64_MAX;
2810 return 0;
2811 }
2812
2813 *packets = mirror->packet_count;
2814 *bytes = mirror->byte_count;
2815
2816 return 0;
2817}
2818
abe529af
BP
2819static int
2820set_flood_vlans(struct ofproto *ofproto_, unsigned long *flood_vlans)
2821{
2822 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
2823 if (mac_learning_set_flood_vlans(ofproto->ml, flood_vlans)) {
2cc3c58e 2824 mac_learning_flush(ofproto->ml, &ofproto->backer->revalidate_set);
abe529af
BP
2825 }
2826 return 0;
2827}
2828
2829static bool
b4affc74 2830is_mirror_output_bundle(const struct ofproto *ofproto_, void *aux)
abe529af
BP
2831{
2832 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
2833 struct ofbundle *bundle = bundle_lookup(ofproto, aux);
2834 return bundle && bundle->mirror_out != 0;
2835}
8402c74b
SS
2836
2837static void
b53055f4 2838forward_bpdu_changed(struct ofproto *ofproto_)
8402c74b
SS
2839{
2840 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
2cc3c58e 2841 ofproto->backer->need_revalidate = REV_RECONFIGURE;
8402c74b 2842}
e764773c
BP
2843
2844static void
c4069512
BP
2845set_mac_table_config(struct ofproto *ofproto_, unsigned int idle_time,
2846 size_t max_entries)
e764773c
BP
2847{
2848 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
2849 mac_learning_set_idle_time(ofproto->ml, idle_time);
c4069512 2850 mac_learning_set_max_entries(ofproto->ml, max_entries);
e764773c 2851}
abe529af
BP
2852\f
2853/* Ports. */
2854
2855static struct ofport_dpif *
4acbc98d 2856get_ofp_port(const struct ofproto_dpif *ofproto, uint16_t ofp_port)
abe529af 2857{
7df6a8bd
BP
2858 struct ofport *ofport = ofproto_get_port(&ofproto->up, ofp_port);
2859 return ofport ? ofport_dpif_cast(ofport) : NULL;
abe529af
BP
2860}
2861
2862static struct ofport_dpif *
4acbc98d 2863get_odp_port(const struct ofproto_dpif *ofproto, uint32_t odp_port)
abe529af 2864{
7c33b188
JR
2865 struct ofport_dpif *port = odp_port_to_ofport(ofproto->backer, odp_port);
2866 return port && &ofproto->up == port->up.ofproto ? port : NULL;
abe529af
BP
2867}
2868
2869static void
e1b1d06a
JP
2870ofproto_port_from_dpif_port(struct ofproto_dpif *ofproto,
2871 struct ofproto_port *ofproto_port,
abe529af
BP
2872 struct dpif_port *dpif_port)
2873{
2874 ofproto_port->name = dpif_port->name;
2875 ofproto_port->type = dpif_port->type;
e1b1d06a 2876 ofproto_port->ofp_port = odp_port_to_ofp_port(ofproto, dpif_port->port_no);
abe529af
BP
2877}
2878
0a740f48
EJ
2879static struct ofport_dpif *
2880ofport_get_peer(const struct ofport_dpif *ofport_dpif)
2881{
2882 const struct ofproto_dpif *ofproto;
2883 const char *peer;
2884
2885 peer = netdev_vport_patch_peer(ofport_dpif->up.netdev);
2886 if (!peer) {
2887 return NULL;
2888 }
2889
2890 HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_node, &all_ofproto_dpifs) {
2891 struct ofport *ofport;
2892
2893 ofport = shash_find_data(&ofproto->up.port_by_name, peer);
2894 if (ofport && ofport->ofproto->ofproto_class == &ofproto_dpif_class) {
2895 return ofport_dpif_cast(ofport);
2896 }
2897 }
2898 return NULL;
2899}
2900
0aa66d6e
EJ
2901static void
2902port_run_fast(struct ofport_dpif *ofport)
2903{
2904 if (ofport->cfm && cfm_should_send_ccm(ofport->cfm)) {
2905 struct ofpbuf packet;
2906
2907 ofpbuf_init(&packet, 0);
2908 cfm_compose_ccm(ofport->cfm, &packet, ofport->up.pp.hw_addr);
2909 send_packet(ofport, &packet);
2910 ofpbuf_uninit(&packet);
2911 }
2912}
2913
abe529af
BP
2914static void
2915port_run(struct ofport_dpif *ofport)
2916{
3e5b3fdb
EJ
2917 long long int carrier_seq = netdev_get_carrier_resets(ofport->up.netdev);
2918 bool carrier_changed = carrier_seq != ofport->carrier_seq;
015e08bc
EJ
2919 bool enable = netdev_get_carrier(ofport->up.netdev);
2920
3e5b3fdb
EJ
2921 ofport->carrier_seq = carrier_seq;
2922
0aa66d6e 2923 port_run_fast(ofport);
b9ad7294
EJ
2924
2925 if (ofport->tnl_port
2926 && tnl_port_reconfigure(&ofport->up, ofport->odp_port,
2927 &ofport->tnl_port)) {
2928 ofproto_dpif_cast(ofport->up.ofproto)->backer->need_revalidate = true;
2929 }
2930
abe529af 2931 if (ofport->cfm) {
4653c558
EJ
2932 int cfm_opup = cfm_get_opup(ofport->cfm);
2933
abe529af 2934 cfm_run(ofport->cfm);
4653c558
EJ
2935 enable = enable && !cfm_get_fault(ofport->cfm);
2936
2937 if (cfm_opup >= 0) {
2938 enable = enable && cfm_opup;
2939 }
abe529af 2940 }
015e08bc
EJ
2941
2942 if (ofport->bundle) {
2943 enable = enable && lacp_slave_may_enable(ofport->bundle->lacp, ofport);
3e5b3fdb
EJ
2944 if (carrier_changed) {
2945 lacp_slave_carrier_changed(ofport->bundle->lacp, ofport);
2946 }
015e08bc
EJ
2947 }
2948
daff3353
EJ
2949 if (ofport->may_enable != enable) {
2950 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofport->up.ofproto);
2951
2952 if (ofproto->has_bundle_action) {
2cc3c58e 2953 ofproto->backer->need_revalidate = REV_PORT_TOGGLED;
daff3353
EJ
2954 }
2955 }
2956
015e08bc 2957 ofport->may_enable = enable;
abe529af
BP
2958}
2959
2960static void
2961port_wait(struct ofport_dpif *ofport)
2962{
2963 if (ofport->cfm) {
2964 cfm_wait(ofport->cfm);
2965 }
2966}
2967
2968static int
2969port_query_by_name(const struct ofproto *ofproto_, const char *devname,
2970 struct ofproto_port *ofproto_port)
2971{
2972 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
2973 struct dpif_port dpif_port;
2974 int error;
2975
0a740f48
EJ
2976 if (sset_contains(&ofproto->ghost_ports, devname)) {
2977 const char *type = netdev_get_type_from_name(devname);
2978
2979 /* We may be called before ofproto->up.port_by_name is populated with
2980 * the appropriate ofport. For this reason, we must get the name and
2981 * type from the netdev layer directly. */
2982 if (type) {
2983 const struct ofport *ofport;
2984
2985 ofport = shash_find_data(&ofproto->up.port_by_name, devname);
2986 ofproto_port->ofp_port = ofport ? ofport->ofp_port : OFPP_NONE;
2987 ofproto_port->name = xstrdup(devname);
2988 ofproto_port->type = xstrdup(type);
2989 return 0;
2990 }
2991 return ENODEV;
2992 }
2993
acf60855
JP
2994 if (!sset_contains(&ofproto->ports, devname)) {
2995 return ENODEV;
2996 }
2997 error = dpif_port_query_by_name(ofproto->backer->dpif,
2998 devname, &dpif_port);
abe529af 2999 if (!error) {
e1b1d06a 3000 ofproto_port_from_dpif_port(ofproto, ofproto_port, &dpif_port);
abe529af
BP
3001 }
3002 return error;
3003}
3004
3005static int
e1b1d06a 3006port_add(struct ofproto *ofproto_, struct netdev *netdev)
abe529af
BP
3007{
3008 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
b9ad7294
EJ
3009 const char *dp_port_name = netdev_vport_get_dpif_port(netdev);
3010 const char *devname = netdev_get_name(netdev);
abe529af 3011
0a740f48
EJ
3012 if (netdev_vport_is_patch(netdev)) {
3013 sset_add(&ofproto->ghost_ports, netdev_get_name(netdev));
3014 return 0;
3015 }
3016
b9ad7294
EJ
3017 if (!dpif_port_exists(ofproto->backer->dpif, dp_port_name)) {
3018 int error = dpif_port_add(ofproto->backer->dpif, netdev, NULL);
3019 if (error) {
3020 return error;
3021 }
acf60855 3022 }
b9ad7294
EJ
3023
3024 if (netdev_get_tunnel_config(netdev)) {
3025 sset_add(&ofproto->ghost_ports, devname);
3026 sset_add(&ofproto->backer->tnl_backers, dp_port_name);
3027 } else {
3028 sset_add(&ofproto->ports, devname);
3029 }
3030 return 0;
3031}
3032
3033/* Returns true if the odp_port backing 'ofport' may be deleted from the
3034 * datapath. In most cases, this function simply returns true. However, for
3035 * tunnels it's possible that multiple ofports use the same odp_port, in which
3036 * case we need to keep the odp_port backer around until the last ofport is
3037 * deleted. */
3038static bool
3039may_dpif_port_del(struct ofport_dpif *ofport)
3040{
3041 struct dpif_backer *backer = ofproto_dpif_cast(ofport->up.ofproto)->backer;
3042 struct ofproto_dpif *ofproto_iter;
3043
3044 if (!ofport->tnl_port) {
3045 return true;
3046 }
3047
3048 HMAP_FOR_EACH (ofproto_iter, all_ofproto_dpifs_node, &all_ofproto_dpifs) {
3049 struct ofport_dpif *iter;
3050
3051 if (backer != ofproto_iter->backer) {
3052 continue;
3053 }
3054
3055 HMAP_FOR_EACH (iter, up.hmap_node, &ofproto_iter->up.ports) {
3056 if (ofport == iter) {
3057 continue;
3058 }
3059
3060 if (!strcmp(netdev_vport_get_dpif_port(ofport->up.netdev),
3061 netdev_vport_get_dpif_port(iter->up.netdev))) {
3062 return false;
3063 }
3064 }
3065 }
3066
3067 return true;
abe529af
BP
3068}
3069
3070static int
3071port_del(struct ofproto *ofproto_, uint16_t ofp_port)
3072{
3073 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
b9ad7294 3074 struct ofport_dpif *ofport = get_ofp_port(ofproto, ofp_port);
e1b1d06a 3075 int error = 0;
abe529af 3076
b9ad7294
EJ
3077 if (!ofport) {
3078 return 0;
e1b1d06a 3079 }
b9ad7294
EJ
3080
3081 sset_find_and_delete(&ofproto->ghost_ports,
3082 netdev_get_name(ofport->up.netdev));
3083 if (may_dpif_port_del(ofport)) {
3084 error = dpif_port_del(ofproto->backer->dpif, ofport->odp_port);
3085 if (!error) {
3086 const char *dpif_port;
3087
abe529af
BP
3088 /* The caller is going to close ofport->up.netdev. If this is a
3089 * bonded port, then the bond is using that netdev, so remove it
3090 * from the bond. The client will need to reconfigure everything
3091 * after deleting ports, so then the slave will get re-added. */
b9ad7294
EJ
3092 dpif_port = netdev_vport_get_dpif_port(ofport->up.netdev);
3093 sset_find_and_delete(&ofproto->backer->tnl_backers, dpif_port);
abe529af
BP
3094 bundle_remove(&ofport->up);
3095 }
3096 }
3097 return error;
3098}
3099
6527c598
PS
3100static int
3101port_get_stats(const struct ofport *ofport_, struct netdev_stats *stats)
3102{
3103 struct ofport_dpif *ofport = ofport_dpif_cast(ofport_);
3104 int error;
3105
3106 error = netdev_get_stats(ofport->up.netdev, stats);
3107
ee382d89 3108 if (!error && ofport_->ofp_port == OFPP_LOCAL) {
6527c598
PS
3109 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofport->up.ofproto);
3110
3111 /* ofproto->stats.tx_packets represents packets that we created
3112 * internally and sent to some port (e.g. packets sent with
3113 * send_packet()). Account for them as if they had come from
3114 * OFPP_LOCAL and got forwarded. */
3115
3116 if (stats->rx_packets != UINT64_MAX) {
3117 stats->rx_packets += ofproto->stats.tx_packets;
3118 }
3119
3120 if (stats->rx_bytes != UINT64_MAX) {
3121 stats->rx_bytes += ofproto->stats.tx_bytes;
3122 }
3123
3124 /* ofproto->stats.rx_packets represents packets that were received on
3125 * some port and we processed internally and dropped (e.g. STP).
4e090bc7 3126 * Account for them as if they had been forwarded to OFPP_LOCAL. */
6527c598
PS
3127
3128 if (stats->tx_packets != UINT64_MAX) {
3129 stats->tx_packets += ofproto->stats.rx_packets;
3130 }
3131
3132 if (stats->tx_bytes != UINT64_MAX) {
3133 stats->tx_bytes += ofproto->stats.rx_bytes;
3134 }
3135 }
3136
3137 return error;
3138}
3139
3140/* Account packets for LOCAL port. */
3141static void
3142ofproto_update_local_port_stats(const struct ofproto *ofproto_,
3143 size_t tx_size, size_t rx_size)
3144{
3145 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
3146
3147 if (rx_size) {
3148 ofproto->stats.rx_packets++;
3149 ofproto->stats.rx_bytes += rx_size;
3150 }
3151 if (tx_size) {
3152 ofproto->stats.tx_packets++;
3153 ofproto->stats.tx_bytes += tx_size;
3154 }
3155}
3156
abe529af 3157struct port_dump_state {
acf60855
JP
3158 uint32_t bucket;
3159 uint32_t offset;
0a740f48 3160 bool ghost;
da78d43d
BP
3161
3162 struct ofproto_port port;
3163 bool has_port;
abe529af
BP
3164};
3165
3166static int
acf60855 3167port_dump_start(const struct ofproto *ofproto_ OVS_UNUSED, void **statep)
abe529af 3168{
0a740f48 3169 *statep = xzalloc(sizeof(struct port_dump_state));
abe529af
BP
3170 return 0;
3171}
3172
3173static int
b9ad7294 3174port_dump_next(const struct ofproto *ofproto_, void *state_,
abe529af
BP
3175 struct ofproto_port *port)
3176{
e1b1d06a 3177 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
abe529af 3178 struct port_dump_state *state = state_;
0a740f48 3179 const struct sset *sset;
acf60855 3180 struct sset_node *node;
abe529af 3181
da78d43d
BP
3182 if (state->has_port) {
3183 ofproto_port_destroy(&state->port);
3184 state->has_port = false;
3185 }
0a740f48
EJ
3186 sset = state->ghost ? &ofproto->ghost_ports : &ofproto->ports;
3187 while ((node = sset_at_position(sset, &state->bucket, &state->offset))) {
acf60855
JP
3188 int error;
3189
da78d43d
BP
3190 error = port_query_by_name(ofproto_, node->name, &state->port);
3191 if (!error) {
3192 *port = state->port;
3193 state->has_port = true;
3194 return 0;
3195 } else if (error != ENODEV) {
acf60855
JP
3196 return error;
3197 }
abe529af 3198 }
acf60855 3199
0a740f48
EJ
3200 if (!state->ghost) {
3201 state->ghost = true;
3202 state->bucket = 0;
3203 state->offset = 0;
3204 return port_dump_next(ofproto_, state_, port);
3205 }
3206
acf60855 3207 return EOF;
abe529af
BP
3208}
3209
3210static int
3211port_dump_done(const struct ofproto *ofproto_ OVS_UNUSED, void *state_)
3212{
3213 struct port_dump_state *state = state_;
3214
da78d43d
BP
3215 if (state->has_port) {
3216 ofproto_port_destroy(&state->port);
3217 }
abe529af
BP
3218 free(state);
3219 return 0;
3220}
3221
3222static int
3223port_poll(const struct ofproto *ofproto_, char **devnamep)
3224{
3225 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
acf60855
JP
3226
3227 if (ofproto->port_poll_errno) {
3228 int error = ofproto->port_poll_errno;
3229 ofproto->port_poll_errno = 0;
3230 return error;
3231 }
3232
3233 if (sset_is_empty(&ofproto->port_poll_set)) {
3234 return EAGAIN;
3235 }
3236
3237 *devnamep = sset_pop(&ofproto->port_poll_set);
3238 return 0;
abe529af
BP
3239}
3240
3241static void
3242port_poll_wait(const struct ofproto *ofproto_)
3243{
3244 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
acf60855 3245 dpif_port_poll_wait(ofproto->backer->dpif);
abe529af
BP
3246}
3247
3248static int
3249port_is_lacp_current(const struct ofport *ofport_)
3250{
3251 const struct ofport_dpif *ofport = ofport_dpif_cast(ofport_);
3252 return (ofport->bundle && ofport->bundle->lacp
3253 ? lacp_slave_is_current(ofport->bundle->lacp, ofport)
3254 : -1);
3255}
3256\f
3257/* Upcall handling. */
3258
501f8d1f
BP
3259/* Flow miss batching.
3260 *
3261 * Some dpifs implement operations faster when you hand them off in a batch.
3262 * To allow batching, "struct flow_miss" queues the dpif-related work needed
3263 * for a given flow. Each "struct flow_miss" corresponds to sending one or
3264 * more packets, plus possibly installing the flow in the dpif.
3265 *
3266 * So far we only batch the operations that affect flow setup time the most.
3267 * It's possible to batch more than that, but the benefit might be minimal. */
3268struct flow_miss {
3269 struct hmap_node hmap_node;
acf60855 3270 struct ofproto_dpif *ofproto;
501f8d1f 3271 struct flow flow;
b0f7b9b5 3272 enum odp_key_fitness key_fitness;
501f8d1f
BP
3273 const struct nlattr *key;
3274 size_t key_len;
e84173dc 3275 ovs_be16 initial_tci;
501f8d1f 3276 struct list packets;
6a7e895f 3277 enum dpif_upcall_type upcall_type;
a088a1ff 3278 uint32_t odp_in_port;
501f8d1f
BP
3279};
3280
3281struct flow_miss_op {
c2b565b5 3282 struct dpif_op dpif_op;
5fe20d5d
BP
3283 struct subfacet *subfacet; /* Subfacet */
3284 void *garbage; /* Pointer to pass to free(), NULL if none. */
3285 uint64_t stub[1024 / 8]; /* Temporary buffer. */
501f8d1f
BP
3286};
3287
62cd7072
BP
3288/* Sends an OFPT_PACKET_IN message for 'packet' of type OFPR_NO_MATCH to each
3289 * OpenFlow controller as necessary according to their individual
29ebe880 3290 * configurations. */
62cd7072 3291static void
a39edbd4 3292send_packet_in_miss(struct ofproto_dpif *ofproto, const struct ofpbuf *packet,
29ebe880 3293 const struct flow *flow)
62cd7072
BP
3294{
3295 struct ofputil_packet_in pin;
3296
3e3252fa
EJ
3297 pin.packet = packet->data;
3298 pin.packet_len = packet->size;
62cd7072 3299 pin.reason = OFPR_NO_MATCH;
a7349929 3300 pin.controller_id = 0;
54834960
EJ
3301
3302 pin.table_id = 0;
3303 pin.cookie = 0;
3304
62cd7072 3305 pin.send_len = 0; /* not used for flow table misses */
5d6c3af0
EJ
3306
3307 flow_get_metadata(flow, &pin.fmd);
3308
d8653c38 3309 connmgr_send_packet_in(ofproto->up.connmgr, &pin);
62cd7072
BP
3310}
3311
6a7e895f 3312static enum slow_path_reason
abe529af
BP
3313process_special(struct ofproto_dpif *ofproto, const struct flow *flow,
3314 const struct ofpbuf *packet)
3315{
b6e001b6
EJ
3316 struct ofport_dpif *ofport = get_ofp_port(ofproto, flow->in_port);
3317
3318 if (!ofport) {
6a7e895f 3319 return 0;
b6e001b6
EJ
3320 }
3321
ef9819b5 3322 if (ofport->cfm && cfm_should_process_flow(ofport->cfm, flow)) {
b6e001b6 3323 if (packet) {
abe529af
BP
3324 cfm_process_heartbeat(ofport->cfm, packet);
3325 }
6a7e895f 3326 return SLOW_CFM;
b6e001b6
EJ
3327 } else if (ofport->bundle && ofport->bundle->lacp
3328 && flow->dl_type == htons(ETH_TYPE_LACP)) {
3329 if (packet) {
3330 lacp_process_packet(ofport->bundle->lacp, ofport, packet);
abe529af 3331 }
6a7e895f 3332 return SLOW_LACP;
21f7563c
JP
3333 } else if (ofproto->stp && stp_should_process_flow(flow)) {
3334 if (packet) {
3335 stp_process_packet(ofport, packet);
3336 }
6a7e895f 3337 return SLOW_STP;
abe529af 3338 }
6a7e895f 3339 return 0;
abe529af
BP
3340}
3341
501f8d1f 3342static struct flow_miss *
b23cdad9 3343flow_miss_find(struct hmap *todo, const struct flow *flow, uint32_t hash)
abe529af 3344{
501f8d1f 3345 struct flow_miss *miss;
abe529af 3346
501f8d1f
BP
3347 HMAP_FOR_EACH_WITH_HASH (miss, hmap_node, hash, todo) {
3348 if (flow_equal(&miss->flow, flow)) {
3349 return miss;
3350 }
3351 }
abe529af 3352
b23cdad9 3353 return NULL;
501f8d1f 3354}
abe529af 3355
9d6ac44e
BP
3356/* Partially Initializes 'op' as an "execute" operation for 'miss' and
3357 * 'packet'. The caller must initialize op->actions and op->actions_len. If
3358 * 'miss' is associated with a subfacet the caller must also initialize the
3359 * returned op->subfacet, and if anything needs to be freed after processing
3360 * the op, the caller must initialize op->garbage also. */
501f8d1f 3361static void
9d6ac44e
BP
3362init_flow_miss_execute_op(struct flow_miss *miss, struct ofpbuf *packet,
3363 struct flow_miss_op *op)
501f8d1f 3364{
9d6ac44e
BP
3365 if (miss->flow.vlan_tci != miss->initial_tci) {
3366 /* This packet was received on a VLAN splinter port. We
3367 * added a VLAN to the packet to make the packet resemble
3368 * the flow, but the actions were composed assuming that
3369 * the packet contained no VLAN. So, we must remove the
3370 * VLAN header from the packet before trying to execute the
3371 * actions. */
3372 eth_pop_vlan(packet);
3373 }
3374
3375 op->subfacet = NULL;
3376 op->garbage = NULL;
3377 op->dpif_op.type = DPIF_OP_EXECUTE;
3378 op->dpif_op.u.execute.key = miss->key;
3379 op->dpif_op.u.execute.key_len = miss->key_len;
3380 op->dpif_op.u.execute.packet = packet;
3381}
3382
3383/* Helper for handle_flow_miss_without_facet() and
3384 * handle_flow_miss_with_facet(). */
3385static void
3386handle_flow_miss_common(struct rule_dpif *rule,
3387 struct ofpbuf *packet, const struct flow *flow)
3388{
3389 struct ofproto_dpif *ofproto = ofproto_dpif_cast(rule->up.ofproto);
3390
3391 ofproto->n_matches++;
3392
3393 if (rule->up.cr.priority == FAIL_OPEN_PRIORITY) {
3394 /*
3395 * Extra-special case for fail-open mode.
3396 *
3397 * We are in fail-open mode and the packet matched the fail-open
3398 * rule, but we are connected to a controller too. We should send
3399 * the packet up to the controller in the hope that it will try to
3400 * set up a flow and thereby allow us to exit fail-open.
3401 *
3402 * See the top-level comment in fail-open.c for more information.
3403 */
3404 send_packet_in_miss(ofproto, packet, flow);
3405 }
3406}
3407
3408/* Figures out whether a flow that missed in 'ofproto', whose details are in
3409 * 'miss', is likely to be worth tracking in detail in userspace and (usually)
3410 * installing a datapath flow. The answer is usually "yes" (a return value of
3411 * true). However, for short flows the cost of bookkeeping is much higher than
3412 * the benefits, so when the datapath holds a large number of flows we impose
3413 * some heuristics to decide which flows are likely to be worth tracking. */
3414static bool
3415flow_miss_should_make_facet(struct ofproto_dpif *ofproto,
3416 struct flow_miss *miss, uint32_t hash)
3417{
3418 if (!ofproto->governor) {
3419 size_t n_subfacets;
3420
3421 n_subfacets = hmap_count(&ofproto->subfacets);
3422 if (n_subfacets * 2 <= ofproto->up.flow_eviction_threshold) {
3423 return true;
3424 }
3425
3426 ofproto->governor = governor_create(ofproto->up.name);
3427 }
3428
3429 return governor_should_install_flow(ofproto->governor, hash,
3430 list_size(&miss->packets));
3431}
3432
3433/* Handles 'miss', which matches 'rule', without creating a facet or subfacet
3434 * or creating any datapath flow. May add an "execute" operation to 'ops' and
3435 * increment '*n_ops'. */
3436static void
3437handle_flow_miss_without_facet(struct flow_miss *miss,
3438 struct rule_dpif *rule,
3439 struct flow_miss_op *ops, size_t *n_ops)
3440{
3441 struct ofproto_dpif *ofproto = ofproto_dpif_cast(rule->up.ofproto);
a7752d4a 3442 long long int now = time_msec();
9d6ac44e 3443 struct action_xlate_ctx ctx;
530a1d91 3444 struct ofpbuf *packet;
2b459b83 3445
9d6ac44e
BP
3446 LIST_FOR_EACH (packet, list_node, &miss->packets) {
3447 struct flow_miss_op *op = &ops[*n_ops];
3448 struct dpif_flow_stats stats;
3449 struct ofpbuf odp_actions;
abe529af 3450
9d6ac44e 3451 COVERAGE_INC(facet_suppress);
501f8d1f 3452
9d6ac44e 3453 ofpbuf_use_stub(&odp_actions, op->stub, sizeof op->stub);
501f8d1f 3454
a7752d4a 3455 dpif_flow_stats_extract(&miss->flow, packet, now, &stats);
9d6ac44e 3456 rule_credit_stats(rule, &stats);
abe529af 3457
9d6ac44e
BP
3458 action_xlate_ctx_init(&ctx, ofproto, &miss->flow, miss->initial_tci,
3459 rule, 0, packet);
3460 ctx.resubmit_stats = &stats;
f25d0cf3 3461 xlate_actions(&ctx, rule->up.ofpacts, rule->up.ofpacts_len,
9d6ac44e 3462 &odp_actions);
abe529af 3463
9d6ac44e
BP
3464 if (odp_actions.size) {
3465 struct dpif_execute *execute = &op->dpif_op.u.execute;
3466
3467 init_flow_miss_execute_op(miss, packet, op);
3468 execute->actions = odp_actions.data;
3469 execute->actions_len = odp_actions.size;
3470 op->garbage = ofpbuf_get_uninit_pointer(&odp_actions);
3471
3472 (*n_ops)++;
3473 } else {
3474 ofpbuf_uninit(&odp_actions);
3475 }
abe529af 3476 }
9d6ac44e
BP
3477}
3478
3479/* Handles 'miss', which matches 'facet'. May add any required datapath
459b16a1
BP
3480 * operations to 'ops', incrementing '*n_ops' for each new op.
3481 *
3482 * All of the packets in 'miss' are considered to have arrived at time 'now'.
3483 * This is really important only for new facets: if we just called time_msec()
3484 * here, then the new subfacet or its packets could look (occasionally) as
3485 * though it was used some time after the facet was used. That can make a
3486 * one-packet flow look like it has a nonzero duration, which looks odd in
3487 * e.g. NetFlow statistics. */
9d6ac44e
BP
3488static void
3489handle_flow_miss_with_facet(struct flow_miss *miss, struct facet *facet,
459b16a1 3490 long long int now,
9d6ac44e
BP
3491 struct flow_miss_op *ops, size_t *n_ops)
3492{
6a7e895f
BP
3493 struct ofproto_dpif *ofproto = ofproto_dpif_cast(facet->rule->up.ofproto);
3494 enum subfacet_path want_path;
9d6ac44e
BP
3495 struct subfacet *subfacet;
3496 struct ofpbuf *packet;
abe529af 3497
a088a1ff 3498 subfacet = subfacet_create(facet, miss, now);
b0f7b9b5 3499
530a1d91 3500 LIST_FOR_EACH (packet, list_node, &miss->packets) {
5fe20d5d 3501 struct flow_miss_op *op = &ops[*n_ops];
67d91f78 3502 struct dpif_flow_stats stats;
5fe20d5d 3503 struct ofpbuf odp_actions;
67d91f78 3504
9d6ac44e 3505 handle_flow_miss_common(facet->rule, packet, &miss->flow);
501f8d1f 3506
5fe20d5d 3507 ofpbuf_use_stub(&odp_actions, op->stub, sizeof op->stub);
6a7e895f 3508 if (!subfacet->actions || subfacet->slow) {
5fe20d5d 3509 subfacet_make_actions(subfacet, packet, &odp_actions);
501f8d1f 3510 }
67d91f78 3511
459b16a1 3512 dpif_flow_stats_extract(&facet->flow, packet, now, &stats);
15baa734 3513 subfacet_update_stats(subfacet, &stats);
67d91f78 3514
9d6ac44e
BP
3515 if (subfacet->actions_len) {
3516 struct dpif_execute *execute = &op->dpif_op.u.execute;
8338659a 3517
9d6ac44e
BP
3518 init_flow_miss_execute_op(miss, packet, op);
3519 op->subfacet = subfacet;
6a7e895f 3520 if (!subfacet->slow) {
9d6ac44e
BP
3521 execute->actions = subfacet->actions;
3522 execute->actions_len = subfacet->actions_len;
3523 ofpbuf_uninit(&odp_actions);
3524 } else {
3525 execute->actions = odp_actions.data;
3526 execute->actions_len = odp_actions.size;
3527 op->garbage = ofpbuf_get_uninit_pointer(&odp_actions);
3528 }
999fba59 3529
9d6ac44e 3530 (*n_ops)++;
5fe20d5d 3531 } else {
9d6ac44e 3532 ofpbuf_uninit(&odp_actions);
5fe20d5d 3533 }
501f8d1f
BP
3534 }
3535
6a7e895f
BP
3536 want_path = subfacet_want_path(subfacet->slow);
3537 if (miss->upcall_type == DPIF_UC_MISS || subfacet->path != want_path) {
501f8d1f 3538 struct flow_miss_op *op = &ops[(*n_ops)++];
c2b565b5 3539 struct dpif_flow_put *put = &op->dpif_op.u.flow_put;
501f8d1f 3540
b0f7b9b5 3541 op->subfacet = subfacet;
5fe20d5d 3542 op->garbage = NULL;
c2b565b5 3543 op->dpif_op.type = DPIF_OP_FLOW_PUT;
501f8d1f
BP
3544 put->flags = DPIF_FP_CREATE | DPIF_FP_MODIFY;
3545 put->key = miss->key;
3546 put->key_len = miss->key_len;
6a7e895f
BP
3547 if (want_path == SF_FAST_PATH) {
3548 put->actions = subfacet->actions;
3549 put->actions_len = subfacet->actions_len;
3550 } else {
3551 compose_slow_path(ofproto, &facet->flow, subfacet->slow,
3552 op->stub, sizeof op->stub,
3553 &put->actions, &put->actions_len);
3554 }
501f8d1f
BP
3555 put->stats = NULL;
3556 }
3557}
3558
acf60855
JP
3559/* Handles flow miss 'miss'. May add any required datapath operations
3560 * to 'ops', incrementing '*n_ops' for each new op. */
9d6ac44e 3561static void
acf60855
JP
3562handle_flow_miss(struct flow_miss *miss, struct flow_miss_op *ops,
3563 size_t *n_ops)
9d6ac44e 3564{
acf60855 3565 struct ofproto_dpif *ofproto = miss->ofproto;
9d6ac44e 3566 struct facet *facet;
459b16a1 3567 long long int now;
9d6ac44e
BP
3568 uint32_t hash;
3569
3570 /* The caller must ensure that miss->hmap_node.hash contains
3571 * flow_hash(miss->flow, 0). */
3572 hash = miss->hmap_node.hash;
3573
3574 facet = facet_lookup_valid(ofproto, &miss->flow, hash);
3575 if (!facet) {
c57b2226
BP
3576 struct rule_dpif *rule = rule_dpif_lookup(ofproto, &miss->flow);
3577
3578 if (!flow_miss_should_make_facet(ofproto, miss, hash)) {
9d6ac44e
BP
3579 handle_flow_miss_without_facet(miss, rule, ops, n_ops);
3580 return;
3581 }
3582
3583 facet = facet_create(rule, &miss->flow, hash);
459b16a1
BP
3584 now = facet->used;
3585 } else {
3586 now = time_msec();
9d6ac44e 3587 }
459b16a1 3588 handle_flow_miss_with_facet(miss, facet, now, ops, n_ops);
9d6ac44e
BP
3589}
3590
8f73d537
EJ
3591static struct drop_key *
3592drop_key_lookup(const struct dpif_backer *backer, const struct nlattr *key,
3593 size_t key_len)
3594{
3595 struct drop_key *drop_key;
3596
3597 HMAP_FOR_EACH_WITH_HASH (drop_key, hmap_node, hash_bytes(key, key_len, 0),
3598 &backer->drop_keys) {
3599 if (drop_key->key_len == key_len
3600 && !memcmp(drop_key->key, key, key_len)) {
3601 return drop_key;
3602 }
3603 }
3604 return NULL;
3605}
3606
3607static void
3608drop_key_clear(struct dpif_backer *backer)
3609{
3610 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 15);
3611 struct drop_key *drop_key, *next;
3612
3613 HMAP_FOR_EACH_SAFE (drop_key, next, hmap_node, &backer->drop_keys) {
3614 int error;
3615
3616 error = dpif_flow_del(backer->dpif, drop_key->key, drop_key->key_len,
3617 NULL);
3618 if (error && !VLOG_DROP_WARN(&rl)) {
3619 struct ds ds = DS_EMPTY_INITIALIZER;
3620 odp_flow_key_format(drop_key->key, drop_key->key_len, &ds);
3621 VLOG_WARN("Failed to delete drop key (%s) (%s)", strerror(error),
3622 ds_cstr(&ds));
3623 ds_destroy(&ds);
3624 }
3625
3626 hmap_remove(&backer->drop_keys, &drop_key->hmap_node);
3627 free(drop_key->key);
3628 free(drop_key);
3629 }
3630}
3631
e09ee259
EJ
3632/* Given a datpath, packet, and flow metadata ('backer', 'packet', and 'key'
3633 * respectively), populates 'flow' with the result of odp_flow_key_to_flow().
3634 * Optionally, if nonnull, populates 'fitnessp' with the fitness of 'flow' as
3635 * returned by odp_flow_key_to_flow(). Also, optionally populates 'ofproto'
3636 * with the ofproto_dpif, and 'odp_in_port' with the datapath in_port, that
3637 * 'packet' ingressed.
e2a6ca36 3638 *
e09ee259
EJ
3639 * If 'ofproto' is nonnull, requires 'flow''s in_port to exist. Otherwise sets
3640 * 'flow''s in_port to OFPP_NONE.
3641 *
3642 * This function does post-processing on data returned from
3643 * odp_flow_key_to_flow() to help make VLAN splinters transparent to the rest
3644 * of the upcall processing logic. In particular, if the extracted in_port is
3645 * a VLAN splinter port, it replaces flow->in_port by the "real" port, sets
3646 * flow->vlan_tci correctly for the VLAN of the VLAN splinter port, and pushes
3647 * a VLAN header onto 'packet' (if it is nonnull).
3648 *
3649 * Optionally, if nonnull, sets '*initial_tci' to the VLAN TCI with which the
3650 * packet was really received, that is, the actual VLAN TCI extracted by
3651 * odp_flow_key_to_flow(). (This differs from the value returned in
3652 * flow->vlan_tci only for packets received on VLAN splinters.)
3653 *
b9ad7294
EJ
3654 * Similarly, this function also includes some logic to help with tunnels. It
3655 * may modify 'flow' as necessary to make the tunneling implementation
3656 * transparent to the upcall processing logic.
3657 *
e09ee259
EJ
3658 * Returns 0 if successful, ENODEV if the parsed flow has no associated ofport,
3659 * or some other positive errno if there are other problems. */
3660static int
3661ofproto_receive(const struct dpif_backer *backer, struct ofpbuf *packet,
3662 const struct nlattr *key, size_t key_len,
3663 struct flow *flow, enum odp_key_fitness *fitnessp,
3664 struct ofproto_dpif **ofproto, uint32_t *odp_in_port,
3665 ovs_be16 *initial_tci)
e84173dc 3666{
e09ee259
EJ
3667 const struct ofport_dpif *port;
3668 enum odp_key_fitness fitness;
b9ad7294 3669 int error = ENODEV;
e09ee259
EJ
3670
3671 fitness = odp_flow_key_to_flow(key, key_len, flow);
e84173dc 3672 if (fitness == ODP_FIT_ERROR) {
e09ee259
EJ
3673 error = EINVAL;
3674 goto exit;
3675 }
3676
3677 if (initial_tci) {
3678 *initial_tci = flow->vlan_tci;
e84173dc 3679 }
e84173dc 3680
e09ee259
EJ
3681 if (odp_in_port) {
3682 *odp_in_port = flow->in_port;
3683 }
3684
b9ad7294
EJ
3685 if (tnl_port_should_receive(flow)) {
3686 const struct ofport *ofport = tnl_port_receive(flow);
3687 if (!ofport) {
3688 flow->in_port = OFPP_NONE;
3689 goto exit;
3690 }
3691 port = ofport_dpif_cast(ofport);
e09ee259 3692
b9ad7294
EJ
3693 /* We can't reproduce 'key' from 'flow'. */
3694 fitness = fitness == ODP_FIT_PERFECT ? ODP_FIT_TOO_MUCH : fitness;
e09ee259 3695
b9ad7294
EJ
3696 /* XXX: Since the tunnel module is not scoped per backer, it's
3697 * theoretically possible that we'll receive an ofport belonging to an
3698 * entirely different datapath. In practice, this can't happen because
3699 * no platforms has two separate datapaths which each support
3700 * tunneling. */
3701 ovs_assert(ofproto_dpif_cast(port->up.ofproto)->backer == backer);
3702 } else {
3703 port = odp_port_to_ofport(backer, flow->in_port);
3704 if (!port) {
3705 flow->in_port = OFPP_NONE;
3706 goto exit;
3707 }
3708
3709 flow->in_port = port->up.ofp_port;
3710 if (vsp_adjust_flow(ofproto_dpif_cast(port->up.ofproto), flow)) {
3711 if (packet) {
3712 /* Make the packet resemble the flow, so that it gets sent to
3713 * an OpenFlow controller properly, so that it looks correct
3714 * for sFlow, and so that flow_extract() will get the correct
3715 * vlan_tci if it is called on 'packet'.
3716 *
3717 * The allocated space inside 'packet' probably also contains
3718 * 'key', that is, both 'packet' and 'key' are probably part of
3719 * a struct dpif_upcall (see the large comment on that
3720 * structure definition), so pushing data on 'packet' is in
3721 * general not a good idea since it could overwrite 'key' or
3722 * free it as a side effect. However, it's OK in this special
3723 * case because we know that 'packet' is inside a Netlink
3724 * attribute: pushing 4 bytes will just overwrite the 4-byte
3725 * "struct nlattr", which is fine since we don't need that
3726 * header anymore. */
3727 eth_push_vlan(packet, flow->vlan_tci);
3728 }
3729 /* We can't reproduce 'key' from 'flow'. */
3730 fitness = fitness == ODP_FIT_PERFECT ? ODP_FIT_TOO_MUCH : fitness;
52a90c29
BP
3731 }
3732 }
e09ee259 3733 error = 0;
52a90c29 3734
b9ad7294
EJ
3735 if (ofproto) {
3736 *ofproto = ofproto_dpif_cast(port->up.ofproto);
3737 }
3738
e09ee259
EJ
3739exit:
3740 if (fitnessp) {
3741 *fitnessp = fitness;
3742 }
3743 return error;
e84173dc
BP
3744}
3745
501f8d1f 3746static void
acf60855 3747handle_miss_upcalls(struct dpif_backer *backer, struct dpif_upcall *upcalls,
501f8d1f
BP
3748 size_t n_upcalls)
3749{
3750 struct dpif_upcall *upcall;
b23cdad9
BP
3751 struct flow_miss *miss;
3752 struct flow_miss misses[FLOW_MISS_MAX_BATCH];
501f8d1f 3753 struct flow_miss_op flow_miss_ops[FLOW_MISS_MAX_BATCH * 2];
c2b565b5 3754 struct dpif_op *dpif_ops[FLOW_MISS_MAX_BATCH * 2];
501f8d1f 3755 struct hmap todo;
b23cdad9 3756 int n_misses;
501f8d1f
BP
3757 size_t n_ops;
3758 size_t i;
3759
3760 if (!n_upcalls) {
3761 return;
3762 }
3763
3764 /* Construct the to-do list.
3765 *
3766 * This just amounts to extracting the flow from each packet and sticking
3767 * the packets that have the same flow in the same "flow_miss" structure so
3768 * that we can process them together. */
3769 hmap_init(&todo);
b23cdad9 3770 n_misses = 0;
501f8d1f 3771 for (upcall = upcalls; upcall < &upcalls[n_upcalls]; upcall++) {
b23cdad9
BP
3772 struct flow_miss *miss = &misses[n_misses];
3773 struct flow_miss *existing_miss;
acf60855 3774 struct ofproto_dpif *ofproto;
a088a1ff 3775 uint32_t odp_in_port;
1d446463 3776 struct flow flow;
b23cdad9 3777 uint32_t hash;
e09ee259 3778 int error;
501f8d1f 3779
e09ee259
EJ
3780 error = ofproto_receive(backer, upcall->packet, upcall->key,
3781 upcall->key_len, &flow, &miss->key_fitness,
3782 &ofproto, &odp_in_port, &miss->initial_tci);
3783 if (error == ENODEV) {
8f73d537
EJ
3784 struct drop_key *drop_key;
3785
acf60855
JP
3786 /* Received packet on port for which we couldn't associate
3787 * an ofproto. This can happen if a port is removed while
3788 * traffic is being received. Print a rate-limited message
8f73d537
EJ
3789 * in case it happens frequently. Install a drop flow so
3790 * that future packets of the flow are inexpensively dropped
3791 * in the kernel. */
acf60855
JP
3792 VLOG_INFO_RL(&rl, "received packet on unassociated port %"PRIu32,
3793 flow.in_port);
8f73d537
EJ
3794
3795 drop_key = drop_key_lookup(backer, upcall->key, upcall->key_len);
3796 if (!drop_key) {
3797 drop_key = xmalloc(sizeof *drop_key);
3798 drop_key->key = xmemdup(upcall->key, upcall->key_len);
3799 drop_key->key_len = upcall->key_len;
3800
3801 hmap_insert(&backer->drop_keys, &drop_key->hmap_node,
3802 hash_bytes(drop_key->key, drop_key->key_len, 0));
3803 dpif_flow_put(backer->dpif, DPIF_FP_CREATE | DPIF_FP_MODIFY,
3804 drop_key->key, drop_key->key_len, NULL, 0, NULL);
3805 }
3806 continue;
acf60855 3807 }
e09ee259 3808 if (error) {
b0f7b9b5
BP
3809 continue;
3810 }
72e8bf28 3811 flow_extract(upcall->packet, flow.skb_priority, flow.skb_mark,
1d446463 3812 &flow.tunnel, flow.in_port, &miss->flow);
501f8d1f 3813
501f8d1f 3814 /* Add other packets to a to-do list. */
b23cdad9
BP
3815 hash = flow_hash(&miss->flow, 0);
3816 existing_miss = flow_miss_find(&todo, &miss->flow, hash);
3817 if (!existing_miss) {
3818 hmap_insert(&todo, &miss->hmap_node, hash);
acf60855 3819 miss->ofproto = ofproto;
b23cdad9
BP
3820 miss->key = upcall->key;
3821 miss->key_len = upcall->key_len;
6a7e895f 3822 miss->upcall_type = upcall->type;
a088a1ff 3823 miss->odp_in_port = odp_in_port;
b23cdad9
BP
3824 list_init(&miss->packets);
3825
3826 n_misses++;
3827 } else {
3828 miss = existing_miss;
3829 }
501f8d1f
BP
3830 list_push_back(&miss->packets, &upcall->packet->list_node);
3831 }
3832
3833 /* Process each element in the to-do list, constructing the set of
3834 * operations to batch. */
3835 n_ops = 0;
33bb0caa 3836 HMAP_FOR_EACH (miss, hmap_node, &todo) {
acf60855 3837 handle_flow_miss(miss, flow_miss_ops, &n_ops);
abe529af 3838 }
cb22974d 3839 ovs_assert(n_ops <= ARRAY_SIZE(flow_miss_ops));
501f8d1f
BP
3840
3841 /* Execute batch. */
3842 for (i = 0; i < n_ops; i++) {
3843 dpif_ops[i] = &flow_miss_ops[i].dpif_op;
3844 }
acf60855 3845 dpif_operate(backer->dpif, dpif_ops, n_ops);
501f8d1f
BP
3846
3847 /* Free memory and update facets. */
3848 for (i = 0; i < n_ops; i++) {
3849 struct flow_miss_op *op = &flow_miss_ops[i];
501f8d1f
BP
3850
3851 switch (op->dpif_op.type) {
3852 case DPIF_OP_EXECUTE:
501f8d1f 3853 break;
abe529af 3854
501f8d1f 3855 case DPIF_OP_FLOW_PUT:
c2b565b5 3856 if (!op->dpif_op.error) {
6a7e895f 3857 op->subfacet->path = subfacet_want_path(op->subfacet->slow);
501f8d1f
BP
3858 }
3859 break;
b99d3cee
BP
3860
3861 case DPIF_OP_FLOW_DEL:
3862 NOT_REACHED();
501f8d1f 3863 }
5fe20d5d
BP
3864
3865 free(op->garbage);
501f8d1f 3866 }
33bb0caa 3867 hmap_destroy(&todo);
abe529af
BP
3868}
3869
6a7e895f
BP
3870static enum { SFLOW_UPCALL, MISS_UPCALL, BAD_UPCALL }
3871classify_upcall(const struct dpif_upcall *upcall)
3872{
3873 union user_action_cookie cookie;
3874
3875 /* First look at the upcall type. */
3876 switch (upcall->type) {
3877 case DPIF_UC_ACTION:
3878 break;
3879
3880 case DPIF_UC_MISS:
3881 return MISS_UPCALL;
3882
3883 case DPIF_N_UC_TYPES:
3884 default:
3885 VLOG_WARN_RL(&rl, "upcall has unexpected type %"PRIu32, upcall->type);
3886 return BAD_UPCALL;
3887 }
3888
3889 /* "action" upcalls need a closer look. */
3890 memcpy(&cookie, &upcall->userdata, sizeof(cookie));
3891 switch (cookie.type) {
3892 case USER_ACTION_COOKIE_SFLOW:
3893 return SFLOW_UPCALL;
3894
3895 case USER_ACTION_COOKIE_SLOW_PATH:
3896 return MISS_UPCALL;
3897
3898 case USER_ACTION_COOKIE_UNSPEC:
3899 default:
3900 VLOG_WARN_RL(&rl, "invalid user cookie : 0x%"PRIx64, upcall->userdata);
3901 return BAD_UPCALL;
3902 }
3903}
3904
abe529af 3905static void
acf60855 3906handle_sflow_upcall(struct dpif_backer *backer,
6a7e895f 3907 const struct dpif_upcall *upcall)
abe529af 3908{
acf60855 3909 struct ofproto_dpif *ofproto;
1673e0e4 3910 union user_action_cookie cookie;
e84173dc 3911 struct flow flow;
e1b1d06a 3912 uint32_t odp_in_port;
abe529af 3913
e09ee259
EJ
3914 if (ofproto_receive(backer, upcall->packet, upcall->key, upcall->key_len,
3915 &flow, NULL, &ofproto, &odp_in_port, NULL)
3916 || !ofproto->sflow) {
e84173dc
BP
3917 return;
3918 }
3919
6a7e895f 3920 memcpy(&cookie, &upcall->userdata, sizeof(cookie));
e1b1d06a
JP
3921 dpif_sflow_received(ofproto->sflow, upcall->packet, &flow,
3922 odp_in_port, &cookie);
6ff686f2
PS
3923}
3924
9b16c439 3925static int
acf60855 3926handle_upcalls(struct dpif_backer *backer, unsigned int max_batch)
6ff686f2 3927{
9b16c439 3928 struct dpif_upcall misses[FLOW_MISS_MAX_BATCH];
90a7c55e
BP
3929 struct ofpbuf miss_bufs[FLOW_MISS_MAX_BATCH];
3930 uint64_t miss_buf_stubs[FLOW_MISS_MAX_BATCH][4096 / 8];
3931 int n_processed;
9b16c439
BP
3932 int n_misses;
3933 int i;
abe529af 3934
cb22974d 3935 ovs_assert(max_batch <= FLOW_MISS_MAX_BATCH);
abe529af 3936
9b16c439 3937 n_misses = 0;
90a7c55e 3938 for (n_processed = 0; n_processed < max_batch; n_processed++) {
9b16c439 3939 struct dpif_upcall *upcall = &misses[n_misses];
90a7c55e 3940 struct ofpbuf *buf = &miss_bufs[n_misses];
9b16c439
BP
3941 int error;
3942
90a7c55e
BP
3943 ofpbuf_use_stub(buf, miss_buf_stubs[n_misses],
3944 sizeof miss_buf_stubs[n_misses]);
acf60855 3945 error = dpif_recv(backer->dpif, upcall, buf);
9b16c439 3946 if (error) {
90a7c55e 3947 ofpbuf_uninit(buf);
9b16c439
BP
3948 break;
3949 }
3950
6a7e895f
BP
3951 switch (classify_upcall(upcall)) {
3952 case MISS_UPCALL:
9b16c439
BP
3953 /* Handle it later. */
3954 n_misses++;
3955 break;
3956
6a7e895f 3957 case SFLOW_UPCALL:
acf60855 3958 handle_sflow_upcall(backer, upcall);
6a7e895f
BP
3959 ofpbuf_uninit(buf);
3960 break;
3961
3962 case BAD_UPCALL:
3963 ofpbuf_uninit(buf);
9b16c439
BP
3964 break;
3965 }
abe529af 3966 }
9b16c439 3967
6a7e895f 3968 /* Handle deferred MISS_UPCALL processing. */
acf60855 3969 handle_miss_upcalls(backer, misses, n_misses);
90a7c55e
BP
3970 for (i = 0; i < n_misses; i++) {
3971 ofpbuf_uninit(&miss_bufs[i]);
3972 }
9b16c439 3973
90a7c55e 3974 return n_processed;
abe529af
BP
3975}
3976\f
3977/* Flow expiration. */
3978
b0f7b9b5 3979static int subfacet_max_idle(const struct ofproto_dpif *);
acf60855 3980static void update_stats(struct dpif_backer *);
abe529af 3981static void rule_expire(struct rule_dpif *);
b0f7b9b5 3982static void expire_subfacets(struct ofproto_dpif *, int dp_max_idle);
abe529af
BP
3983
3984/* This function is called periodically by run(). Its job is to collect
3985 * updates for the flows that have been installed into the datapath, most
3986 * importantly when they last were used, and then use that information to
3987 * expire flows that have not been used recently.
3988 *
3989 * Returns the number of milliseconds after which it should be called again. */
3990static int
acf60855 3991expire(struct dpif_backer *backer)
abe529af 3992{
acf60855
JP
3993 struct ofproto_dpif *ofproto;
3994 int max_idle = INT32_MAX;
abe529af 3995
8f73d537
EJ
3996 /* Periodically clear out the drop keys in an effort to keep them
3997 * relatively few. */
3998 drop_key_clear(backer);
3999
acf60855
JP
4000 /* Update stats for each flow in the backer. */
4001 update_stats(backer);
abe529af 4002
acf60855 4003 HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_node, &all_ofproto_dpifs) {
e503cc19 4004 struct rule *rule, *next_rule;
acf60855 4005 int dp_max_idle;
abe529af 4006
acf60855
JP
4007 if (ofproto->backer != backer) {
4008 continue;
4009 }
0697b5c3 4010
acf60855
JP
4011 /* Expire subfacets that have been idle too long. */
4012 dp_max_idle = subfacet_max_idle(ofproto);
4013 expire_subfacets(ofproto, dp_max_idle);
4014
4015 max_idle = MIN(max_idle, dp_max_idle);
4016
4017 /* Expire OpenFlow flows whose idle_timeout or hard_timeout
4018 * has passed. */
e503cc19
SH
4019 LIST_FOR_EACH_SAFE (rule, next_rule, expirable,
4020 &ofproto->up.expirable) {
4021 rule_expire(rule_dpif_cast(rule));
0697b5c3 4022 }
abe529af 4023
acf60855
JP
4024 /* All outstanding data in existing flows has been accounted, so it's a
4025 * good time to do bond rebalancing. */
4026 if (ofproto->has_bonded_bundles) {
4027 struct ofbundle *bundle;
abe529af 4028
acf60855
JP
4029 HMAP_FOR_EACH (bundle, hmap_node, &ofproto->bundles) {
4030 if (bundle->bond) {
2cc3c58e 4031 bond_rebalance(bundle->bond, &backer->revalidate_set);
acf60855 4032 }
abe529af
BP
4033 }
4034 }
4035 }
4036
acf60855 4037 return MIN(max_idle, 1000);
abe529af
BP
4038}
4039
a218c879
BP
4040/* Updates flow table statistics given that the datapath just reported 'stats'
4041 * as 'subfacet''s statistics. */
4042static void
4043update_subfacet_stats(struct subfacet *subfacet,
4044 const struct dpif_flow_stats *stats)
4045{
4046 struct facet *facet = subfacet->facet;
4047
4048 if (stats->n_packets >= subfacet->dp_packet_count) {
4049 uint64_t extra = stats->n_packets - subfacet->dp_packet_count;
4050 facet->packet_count += extra;
4051 } else {
4052 VLOG_WARN_RL(&rl, "unexpected packet count from the datapath");
4053 }
4054
4055 if (stats->n_bytes >= subfacet->dp_byte_count) {
4056 facet->byte_count += stats->n_bytes - subfacet->dp_byte_count;
4057 } else {
4058 VLOG_WARN_RL(&rl, "unexpected byte count from datapath");
4059 }
4060
4061 subfacet->dp_packet_count = stats->n_packets;
4062 subfacet->dp_byte_count = stats->n_bytes;
4063
4064 facet->tcp_flags |= stats->tcp_flags;
4065
4066 subfacet_update_time(subfacet, stats->used);
4067 if (facet->accounted_bytes < facet->byte_count) {
4068 facet_learn(facet);
4069 facet_account(facet);
4070 facet->accounted_bytes = facet->byte_count;
4071 }
4072 facet_push_stats(facet);
4073}
4074
4075/* 'key' with length 'key_len' bytes is a flow in 'dpif' that we know nothing
4076 * about, or a flow that shouldn't be installed but was anyway. Delete it. */
4077static void
acf60855 4078delete_unexpected_flow(struct ofproto_dpif *ofproto,
a218c879
BP
4079 const struct nlattr *key, size_t key_len)
4080{
4081 if (!VLOG_DROP_WARN(&rl)) {
4082 struct ds s;
4083
4084 ds_init(&s);
4085 odp_flow_key_format(key, key_len, &s);
acf60855 4086 VLOG_WARN("unexpected flow on %s: %s", ofproto->up.name, ds_cstr(&s));
a218c879
BP
4087 ds_destroy(&s);
4088 }
4089
4090 COVERAGE_INC(facet_unexpected);
acf60855 4091 dpif_flow_del(ofproto->backer->dpif, key, key_len, NULL);
a218c879
BP
4092}
4093
abe529af
BP
4094/* Update 'packet_count', 'byte_count', and 'used' members of installed facets.
4095 *
4096 * This function also pushes statistics updates to rules which each facet
4097 * resubmits into. Generally these statistics will be accurate. However, if a
4098 * facet changes the rule it resubmits into at some time in between
4099 * update_stats() runs, it is possible that statistics accrued to the
4100 * old rule will be incorrectly attributed to the new rule. This could be
4101 * avoided by calling update_stats() whenever rules are created or
4102 * deleted. However, the performance impact of making so many calls to the
4103 * datapath do not justify the benefit of having perfectly accurate statistics.
4104 */
4105static void
acf60855 4106update_stats(struct dpif_backer *backer)
abe529af
BP
4107{
4108 const struct dpif_flow_stats *stats;
4109 struct dpif_flow_dump dump;
4110 const struct nlattr *key;
4111 size_t key_len;
4112
acf60855 4113 dpif_flow_dump_start(&dump, backer->dpif);
abe529af 4114 while (dpif_flow_dump_next(&dump, &key, &key_len, NULL, NULL, &stats)) {
acf60855 4115 struct flow flow;
b0f7b9b5 4116 struct subfacet *subfacet;
acf60855 4117 struct ofproto_dpif *ofproto;
b9ad7294 4118 struct ofport_dpif *ofport;
acf60855 4119 uint32_t key_hash;
abe529af 4120
58c6adda
EJ
4121 if (ofproto_receive(backer, NULL, key, key_len, &flow, NULL, &ofproto,
4122 NULL, NULL)) {
acf60855
JP
4123 continue;
4124 }
4125
b9ad7294
EJ
4126 ofport = get_ofp_port(ofproto, flow.in_port);
4127 if (ofport && ofport->tnl_port) {
4128 netdev_vport_inc_rx(ofport->up.netdev, stats);
4129 }
4130
acf60855 4131 key_hash = odp_flow_key_hash(key, key_len);
acf60855 4132 subfacet = subfacet_find(ofproto, key, key_len, key_hash, &flow);
6a7e895f
BP
4133 switch (subfacet ? subfacet->path : SF_NOT_INSTALLED) {
4134 case SF_FAST_PATH:
a218c879 4135 update_subfacet_stats(subfacet, stats);
6a7e895f
BP
4136 break;
4137
4138 case SF_SLOW_PATH:
4139 /* Stats are updated per-packet. */
4140 break;
4141
4142 case SF_NOT_INSTALLED:
4143 default:
acf60855 4144 delete_unexpected_flow(ofproto, key, key_len);
6a7e895f 4145 break;
abe529af
BP
4146 }
4147 }
4148 dpif_flow_dump_done(&dump);
4149}
4150
4151/* Calculates and returns the number of milliseconds of idle time after which
b0f7b9b5
BP
4152 * subfacets should expire from the datapath. When a subfacet expires, we fold
4153 * its statistics into its facet, and when a facet's last subfacet expires, we
4154 * fold its statistic into its rule. */
abe529af 4155static int
b0f7b9b5 4156subfacet_max_idle(const struct ofproto_dpif *ofproto)
abe529af
BP
4157{
4158 /*
4159 * Idle time histogram.
4160 *
b0f7b9b5
BP
4161 * Most of the time a switch has a relatively small number of subfacets.
4162 * When this is the case we might as well keep statistics for all of them
4163 * in userspace and to cache them in the kernel datapath for performance as
abe529af
BP
4164 * well.
4165 *
b0f7b9b5 4166 * As the number of subfacets increases, the memory required to maintain
abe529af 4167 * statistics about them in userspace and in the kernel becomes
b0f7b9b5
BP
4168 * significant. However, with a large number of subfacets it is likely
4169 * that only a few of them are "heavy hitters" that consume a large amount
4170 * of bandwidth. At this point, only heavy hitters are worth caching in
4171 * the kernel and maintaining in userspaces; other subfacets we can
4172 * discard.
abe529af
BP
4173 *
4174 * The technique used to compute the idle time is to build a histogram with
b0f7b9b5 4175 * N_BUCKETS buckets whose width is BUCKET_WIDTH msecs each. Each subfacet
abe529af
BP
4176 * that is installed in the kernel gets dropped in the appropriate bucket.
4177 * After the histogram has been built, we compute the cutoff so that only
b0f7b9b5 4178 * the most-recently-used 1% of subfacets (but at least
084f5290 4179 * ofproto->up.flow_eviction_threshold flows) are kept cached. At least
b0f7b9b5
BP
4180 * the most-recently-used bucket of subfacets is kept, so actually an
4181 * arbitrary number of subfacets can be kept in any given expiration run
084f5290
SH
4182 * (though the next run will delete most of those unless they receive
4183 * additional data).
abe529af 4184 *
b0f7b9b5
BP
4185 * This requires a second pass through the subfacets, in addition to the
4186 * pass made by update_stats(), because the former function never looks at
4187 * uninstallable subfacets.
abe529af
BP
4188 */
4189 enum { BUCKET_WIDTH = ROUND_UP(100, TIME_UPDATE_INTERVAL) };
4190 enum { N_BUCKETS = 5000 / BUCKET_WIDTH };
4191 int buckets[N_BUCKETS] = { 0 };
f11c1ef4 4192 int total, subtotal, bucket;
b0f7b9b5 4193 struct subfacet *subfacet;
abe529af
BP
4194 long long int now;
4195 int i;
4196
b0f7b9b5 4197 total = hmap_count(&ofproto->subfacets);
084f5290 4198 if (total <= ofproto->up.flow_eviction_threshold) {
abe529af
BP
4199 return N_BUCKETS * BUCKET_WIDTH;
4200 }
4201
4202 /* Build histogram. */
4203 now = time_msec();
b0f7b9b5
BP
4204 HMAP_FOR_EACH (subfacet, hmap_node, &ofproto->subfacets) {
4205 long long int idle = now - subfacet->used;
abe529af
BP
4206 int bucket = (idle <= 0 ? 0
4207 : idle >= BUCKET_WIDTH * N_BUCKETS ? N_BUCKETS - 1
4208 : (unsigned int) idle / BUCKET_WIDTH);
4209 buckets[bucket]++;
4210 }
4211
4212 /* Find the first bucket whose flows should be expired. */
f11c1ef4
SH
4213 subtotal = bucket = 0;
4214 do {
4215 subtotal += buckets[bucket++];
084f5290
SH
4216 } while (bucket < N_BUCKETS &&
4217 subtotal < MAX(ofproto->up.flow_eviction_threshold, total / 100));
abe529af
BP
4218
4219 if (VLOG_IS_DBG_ENABLED()) {
4220 struct ds s;
4221
4222 ds_init(&s);
4223 ds_put_cstr(&s, "keep");
4224 for (i = 0; i < N_BUCKETS; i++) {
4225 if (i == bucket) {
4226 ds_put_cstr(&s, ", drop");
4227 }
4228 if (buckets[i]) {
4229 ds_put_format(&s, " %d:%d", i * BUCKET_WIDTH, buckets[i]);
4230 }
4231 }
4232 VLOG_INFO("%s: %s (msec:count)", ofproto->up.name, ds_cstr(&s));
4233 ds_destroy(&s);
4234 }
4235
4236 return bucket * BUCKET_WIDTH;
4237}
4238
abe529af 4239static void
b0f7b9b5 4240expire_subfacets(struct ofproto_dpif *ofproto, int dp_max_idle)
abe529af 4241{
625b0720
BP
4242 /* Cutoff time for most flows. */
4243 long long int normal_cutoff = time_msec() - dp_max_idle;
4244
4245 /* We really want to keep flows for special protocols around, so use a more
4246 * conservative cutoff. */
4247 long long int special_cutoff = time_msec() - 10000;
b99d3cee 4248
b0f7b9b5 4249 struct subfacet *subfacet, *next_subfacet;
1d85f9e5 4250 struct subfacet *batch[SUBFACET_DESTROY_MAX_BATCH];
b99d3cee 4251 int n_batch;
abe529af 4252
b99d3cee 4253 n_batch = 0;
b0f7b9b5
BP
4254 HMAP_FOR_EACH_SAFE (subfacet, next_subfacet, hmap_node,
4255 &ofproto->subfacets) {
625b0720
BP
4256 long long int cutoff;
4257
4258 cutoff = (subfacet->slow & (SLOW_CFM | SLOW_LACP | SLOW_STP)
4259 ? special_cutoff
4260 : normal_cutoff);
b0f7b9b5 4261 if (subfacet->used < cutoff) {
6a7e895f 4262 if (subfacet->path != SF_NOT_INSTALLED) {
b99d3cee 4263 batch[n_batch++] = subfacet;
1d85f9e5
JP
4264 if (n_batch >= SUBFACET_DESTROY_MAX_BATCH) {
4265 subfacet_destroy_batch(ofproto, batch, n_batch);
b99d3cee
BP
4266 n_batch = 0;
4267 }
4268 } else {
4269 subfacet_destroy(subfacet);
4270 }
abe529af
BP
4271 }
4272 }
b99d3cee
BP
4273
4274 if (n_batch > 0) {
1d85f9e5 4275 subfacet_destroy_batch(ofproto, batch, n_batch);
b99d3cee 4276 }
abe529af
BP
4277}
4278
4279/* If 'rule' is an OpenFlow rule, that has expired according to OpenFlow rules,
4280 * then delete it entirely. */
4281static void
4282rule_expire(struct rule_dpif *rule)
4283{
abe529af
BP
4284 struct facet *facet, *next_facet;
4285 long long int now;
4286 uint8_t reason;
4287
e2a3d183
BP
4288 if (rule->up.pending) {
4289 /* We'll have to expire it later. */
4290 return;
4291 }
4292
abe529af
BP
4293 /* Has 'rule' expired? */
4294 now = time_msec();
4295 if (rule->up.hard_timeout
308881af 4296 && now > rule->up.modified + rule->up.hard_timeout * 1000) {
abe529af 4297 reason = OFPRR_HARD_TIMEOUT;
8ea6ac3e 4298 } else if (rule->up.idle_timeout
1745cd08 4299 && now > rule->up.used + rule->up.idle_timeout * 1000) {
abe529af
BP
4300 reason = OFPRR_IDLE_TIMEOUT;
4301 } else {
4302 return;
4303 }
4304
4305 COVERAGE_INC(ofproto_dpif_expired);
4306
4307 /* Update stats. (This is a no-op if the rule expired due to an idle
4308 * timeout, because that only happens when the rule has no facets left.) */
4309 LIST_FOR_EACH_SAFE (facet, next_facet, list_node, &rule->facets) {
15baa734 4310 facet_remove(facet);
abe529af
BP
4311 }
4312
4313 /* Get rid of the rule. */
4314 ofproto_rule_expire(&rule->up, reason);
4315}
4316\f
4317/* Facets. */
4318
f3827897 4319/* Creates and returns a new facet owned by 'rule', given a 'flow'.
abe529af
BP
4320 *
4321 * The caller must already have determined that no facet with an identical
4322 * 'flow' exists in 'ofproto' and that 'flow' is the best match for 'rule' in
f3827897
BP
4323 * the ofproto's classifier table.
4324 *
2b459b83
BP
4325 * 'hash' must be the return value of flow_hash(flow, 0).
4326 *
b0f7b9b5
BP
4327 * The facet will initially have no subfacets. The caller should create (at
4328 * least) one subfacet with subfacet_create(). */
abe529af 4329static struct facet *
2b459b83 4330facet_create(struct rule_dpif *rule, const struct flow *flow, uint32_t hash)
abe529af
BP
4331{
4332 struct ofproto_dpif *ofproto = ofproto_dpif_cast(rule->up.ofproto);
4333 struct facet *facet;
4334
4335 facet = xzalloc(sizeof *facet);
4336 facet->used = time_msec();
2b459b83 4337 hmap_insert(&ofproto->facets, &facet->hmap_node, hash);
abe529af
BP
4338 list_push_back(&rule->facets, &facet->list_node);
4339 facet->rule = rule;
4340 facet->flow = *flow;
b0f7b9b5 4341 list_init(&facet->subfacets);
abe529af
BP
4342 netflow_flow_init(&facet->nf_flow);
4343 netflow_flow_update_time(ofproto->netflow, &facet->nf_flow, facet->used);
4344
abe529af
BP
4345 return facet;
4346}
4347
4348static void
4349facet_free(struct facet *facet)
4350{
abe529af
BP
4351 free(facet);
4352}
4353
3d9e05f8 4354/* Executes, within 'ofproto', the 'n_actions' actions in 'actions' on
0a740f48 4355 * 'packet', which arrived on 'in_port'. */
3d9e05f8
BP
4356static bool
4357execute_odp_actions(struct ofproto_dpif *ofproto, const struct flow *flow,
4358 const struct nlattr *odp_actions, size_t actions_len,
4359 struct ofpbuf *packet)
4360{
4361 struct odputil_keybuf keybuf;
4362 struct ofpbuf key;
4363 int error;
4364
6ff686f2 4365 ofpbuf_use_stack(&key, &keybuf, sizeof keybuf);
e1b1d06a
JP
4366 odp_flow_key_from_flow(&key, flow,
4367 ofp_port_to_odp_port(ofproto, flow->in_port));
80e5eed9 4368
acf60855 4369 error = dpif_execute(ofproto->backer->dpif, key.data, key.size,
6ff686f2 4370 odp_actions, actions_len, packet);
6ff686f2 4371 return !error;
abe529af
BP
4372}
4373
abe529af
BP
4374/* Remove 'facet' from 'ofproto' and free up the associated memory:
4375 *
4376 * - If 'facet' was installed in the datapath, uninstalls it and updates its
b0f7b9b5 4377 * rule's statistics, via subfacet_uninstall().
abe529af
BP
4378 *
4379 * - Removes 'facet' from its rule and from ofproto->facets.
4380 */
4381static void
15baa734 4382facet_remove(struct facet *facet)
abe529af 4383{
15baa734 4384 struct ofproto_dpif *ofproto = ofproto_dpif_cast(facet->rule->up.ofproto);
b0f7b9b5
BP
4385 struct subfacet *subfacet, *next_subfacet;
4386
cb22974d 4387 ovs_assert(!list_is_empty(&facet->subfacets));
551a2f6c
BP
4388
4389 /* First uninstall all of the subfacets to get final statistics. */
4390 LIST_FOR_EACH (subfacet, list_node, &facet->subfacets) {
15baa734 4391 subfacet_uninstall(subfacet);
551a2f6c
BP
4392 }
4393
4394 /* Flush the final stats to the rule.
4395 *
4396 * This might require us to have at least one subfacet around so that we
4397 * can use its actions for accounting in facet_account(), which is why we
4398 * have uninstalled but not yet destroyed the subfacets. */
15baa734 4399 facet_flush_stats(facet);
551a2f6c
BP
4400
4401 /* Now we're really all done so destroy everything. */
b0f7b9b5
BP
4402 LIST_FOR_EACH_SAFE (subfacet, next_subfacet, list_node,
4403 &facet->subfacets) {
15baa734 4404 subfacet_destroy__(subfacet);
b0f7b9b5 4405 }
abe529af
BP
4406 hmap_remove(&ofproto->facets, &facet->hmap_node);
4407 list_remove(&facet->list_node);
4408 facet_free(facet);
4409}
4410
3de9590b
BP
4411/* Feed information from 'facet' back into the learning table to keep it in
4412 * sync with what is actually flowing through the datapath. */
abe529af 4413static void
3de9590b 4414facet_learn(struct facet *facet)
abe529af 4415{
15baa734 4416 struct ofproto_dpif *ofproto = ofproto_dpif_cast(facet->rule->up.ofproto);
3de9590b 4417 struct action_xlate_ctx ctx;
abe529af 4418
3de9590b
BP
4419 if (!facet->has_learn
4420 && !facet->has_normal
4421 && (!facet->has_fin_timeout
4422 || !(facet->tcp_flags & (TCP_FIN | TCP_RST)))) {
abe529af
BP
4423 return;
4424 }
abe529af 4425
3de9590b
BP
4426 action_xlate_ctx_init(&ctx, ofproto, &facet->flow,
4427 facet->flow.vlan_tci,
4428 facet->rule, facet->tcp_flags, NULL);
4429 ctx.may_learn = true;
f25d0cf3
BP
4430 xlate_actions_for_side_effects(&ctx, facet->rule->up.ofpacts,
4431 facet->rule->up.ofpacts_len);
3de9590b
BP
4432}
4433
4434static void
4435facet_account(struct facet *facet)
4436{
4437 struct ofproto_dpif *ofproto = ofproto_dpif_cast(facet->rule->up.ofproto);
4438 struct subfacet *subfacet;
4439 const struct nlattr *a;
4440 unsigned int left;
4441 ovs_be16 vlan_tci;
4442 uint64_t n_bytes;
abe529af 4443
75a75043 4444 if (!facet->has_normal || !ofproto->has_bonded_bundles) {
abe529af
BP
4445 return;
4446 }
3de9590b 4447 n_bytes = facet->byte_count - facet->accounted_bytes;
d78be13b
BP
4448
4449 /* This loop feeds byte counters to bond_account() for rebalancing to use
4450 * as a basis. We also need to track the actual VLAN on which the packet
4451 * is going to be sent to ensure that it matches the one passed to
4452 * bond_choose_output_slave(). (Otherwise, we will account to the wrong
b95fc6ba
BP
4453 * hash bucket.)
4454 *
4455 * We use the actions from an arbitrary subfacet because they should all
4456 * be equally valid for our purpose. */
4457 subfacet = CONTAINER_OF(list_front(&facet->subfacets),
4458 struct subfacet, list_node);
d78be13b 4459 vlan_tci = facet->flow.vlan_tci;
b95fc6ba
BP
4460 NL_ATTR_FOR_EACH_UNSAFE (a, left,
4461 subfacet->actions, subfacet->actions_len) {
fea393b1 4462 const struct ovs_action_push_vlan *vlan;
d78be13b 4463 struct ofport_dpif *port;
abe529af 4464
d78be13b 4465 switch (nl_attr_type(a)) {
df2c07f4 4466 case OVS_ACTION_ATTR_OUTPUT:
abe529af
BP
4467 port = get_odp_port(ofproto, nl_attr_get_u32(a));
4468 if (port && port->bundle && port->bundle->bond) {
d78be13b 4469 bond_account(port->bundle->bond, &facet->flow,
dc155bff 4470 vlan_tci_to_vid(vlan_tci), n_bytes);
abe529af 4471 }
d78be13b
BP
4472 break;
4473
fea393b1
BP
4474 case OVS_ACTION_ATTR_POP_VLAN:
4475 vlan_tci = htons(0);
d78be13b
BP
4476 break;
4477
fea393b1
BP
4478 case OVS_ACTION_ATTR_PUSH_VLAN:
4479 vlan = nl_attr_get(a);
4480 vlan_tci = vlan->vlan_tci;
d78be13b 4481 break;
abe529af
BP
4482 }
4483 }
4484}
4485
abe529af
BP
4486/* Returns true if the only action for 'facet' is to send to the controller.
4487 * (We don't report NetFlow expiration messages for such facets because they
4488 * are just part of the control logic for the network, not real traffic). */
4489static bool
4490facet_is_controller_flow(struct facet *facet)
4491{
f25d0cf3
BP
4492 if (facet) {
4493 const struct rule *rule = &facet->rule->up;
4494 const struct ofpact *ofpacts = rule->ofpacts;
4495 size_t ofpacts_len = rule->ofpacts_len;
4496
dd30ff28
BP
4497 if (ofpacts_len > 0 &&
4498 ofpacts->type == OFPACT_CONTROLLER &&
f25d0cf3
BP
4499 ofpact_next(ofpacts) >= ofpact_end(ofpacts, ofpacts_len)) {
4500 return true;
4501 }
4502 }
4503 return false;
abe529af
BP
4504}
4505
4506/* Folds all of 'facet''s statistics into its rule. Also updates the
4507 * accounting ofhook and emits a NetFlow expiration if appropriate. All of
4508 * 'facet''s statistics in the datapath should have been zeroed and folded into
4509 * its packet and byte counts before this function is called. */
4510static void
15baa734 4511facet_flush_stats(struct facet *facet)
abe529af 4512{
15baa734 4513 struct ofproto_dpif *ofproto = ofproto_dpif_cast(facet->rule->up.ofproto);
b0f7b9b5
BP
4514 struct subfacet *subfacet;
4515
4516 LIST_FOR_EACH (subfacet, list_node, &facet->subfacets) {
cb22974d
BP
4517 ovs_assert(!subfacet->dp_byte_count);
4518 ovs_assert(!subfacet->dp_packet_count);
b0f7b9b5 4519 }
abe529af
BP
4520
4521 facet_push_stats(facet);
3de9590b
BP
4522 if (facet->accounted_bytes < facet->byte_count) {
4523 facet_account(facet);
4524 facet->accounted_bytes = facet->byte_count;
4525 }
abe529af
BP
4526
4527 if (ofproto->netflow && !facet_is_controller_flow(facet)) {
4528 struct ofexpired expired;
4529 expired.flow = facet->flow;
4530 expired.packet_count = facet->packet_count;
4531 expired.byte_count = facet->byte_count;
4532 expired.used = facet->used;
4533 netflow_expire(ofproto->netflow, &facet->nf_flow, &expired);
4534 }
4535
4536 facet->rule->packet_count += facet->packet_count;
4537 facet->rule->byte_count += facet->byte_count;
4538
4539 /* Reset counters to prevent double counting if 'facet' ever gets
4540 * reinstalled. */
bbb5d219 4541 facet_reset_counters(facet);
abe529af
BP
4542
4543 netflow_flow_clear(&facet->nf_flow);
0e553d9c 4544 facet->tcp_flags = 0;
abe529af
BP
4545}
4546
4547/* Searches 'ofproto''s table of facets for one exactly equal to 'flow'.
4548 * Returns it if found, otherwise a null pointer.
4549 *
2b459b83
BP
4550 * 'hash' must be the return value of flow_hash(flow, 0).
4551 *
abe529af
BP
4552 * The returned facet might need revalidation; use facet_lookup_valid()
4553 * instead if that is important. */
4554static struct facet *
2b459b83
BP
4555facet_find(struct ofproto_dpif *ofproto,
4556 const struct flow *flow, uint32_t hash)
abe529af
BP
4557{
4558 struct facet *facet;
4559
2b459b83 4560 HMAP_FOR_EACH_WITH_HASH (facet, hmap_node, hash, &ofproto->facets) {
abe529af
BP
4561 if (flow_equal(flow, &facet->flow)) {
4562 return facet;
4563 }
4564 }
4565
4566 return NULL;
4567}
4568
4569/* Searches 'ofproto''s table of facets for one exactly equal to 'flow'.
4570 * Returns it if found, otherwise a null pointer.
4571 *
2b459b83
BP
4572 * 'hash' must be the return value of flow_hash(flow, 0).
4573 *
abe529af
BP
4574 * The returned facet is guaranteed to be valid. */
4575static struct facet *
2b459b83
BP
4576facet_lookup_valid(struct ofproto_dpif *ofproto, const struct flow *flow,
4577 uint32_t hash)
abe529af 4578{
c57b2226 4579 struct facet *facet;
abe529af 4580
c57b2226 4581 facet = facet_find(ofproto, flow, hash);
abe529af 4582 if (facet
2cc3c58e
EJ
4583 && (ofproto->backer->need_revalidate
4584 || tag_set_intersects(&ofproto->backer->revalidate_set,
4585 facet->tags))) {
c57b2226 4586 facet_revalidate(facet);
abe529af
BP
4587 }
4588
4589 return facet;
4590}
4591
6a7e895f
BP
4592static const char *
4593subfacet_path_to_string(enum subfacet_path path)
4594{
4595 switch (path) {
4596 case SF_NOT_INSTALLED:
4597 return "not installed";
4598 case SF_FAST_PATH:
4599 return "in fast path";
4600 case SF_SLOW_PATH:
4601 return "in slow path";
4602 default:
4603 return "<error>";
4604 }
4605}
4606
4607/* Returns the path in which a subfacet should be installed if its 'slow'
4608 * member has the specified value. */
4609static enum subfacet_path
4610subfacet_want_path(enum slow_path_reason slow)
4611{
4612 return slow ? SF_SLOW_PATH : SF_FAST_PATH;
4613}
4614
4615/* Returns true if 'subfacet' needs to have its datapath flow updated,
4616 * supposing that its actions have been recalculated as 'want_actions' and that
4617 * 'slow' is nonzero iff 'subfacet' should be in the slow path. */
4618static bool
4619subfacet_should_install(struct subfacet *subfacet, enum slow_path_reason slow,
4620 const struct ofpbuf *want_actions)
4621{
4622 enum subfacet_path want_path = subfacet_want_path(slow);
4623 return (want_path != subfacet->path
4624 || (want_path == SF_FAST_PATH
4625 && (subfacet->actions_len != want_actions->size
4626 || memcmp(subfacet->actions, want_actions->data,
4627 subfacet->actions_len))));
4628}
4629
6814e51f
BP
4630static bool
4631facet_check_consistency(struct facet *facet)
4632{
4633 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 15);
4634
4635 struct ofproto_dpif *ofproto = ofproto_dpif_cast(facet->rule->up.ofproto);
4636
050ac423
BP
4637 uint64_t odp_actions_stub[1024 / 8];
4638 struct ofpbuf odp_actions;
4639
6814e51f
BP
4640 struct rule_dpif *rule;
4641 struct subfacet *subfacet;
c53e1132 4642 bool may_log = false;
6814e51f
BP
4643 bool ok;
4644
4645 /* Check the rule for consistency. */
c57b2226
BP
4646 rule = rule_dpif_lookup(ofproto, &facet->flow);
4647 ok = rule == facet->rule;
4648 if (!ok) {
c53e1132 4649 may_log = !VLOG_DROP_WARN(&rl);
c53e1132
BP
4650 if (may_log) {
4651 struct ds s;
6814e51f 4652
c53e1132
BP
4653 ds_init(&s);
4654 flow_format(&s, &facet->flow);
4655 ds_put_format(&s, ": facet associated with wrong rule (was "
4656 "table=%"PRIu8",", facet->rule->up.table_id);
4657 cls_rule_format(&facet->rule->up.cr, &s);
4658 ds_put_format(&s, ") (should have been table=%"PRIu8",",
4659 rule->up.table_id);
4660 cls_rule_format(&rule->up.cr, &s);
4661 ds_put_char(&s, ')');
6814e51f 4662
c53e1132
BP
4663 VLOG_WARN("%s", ds_cstr(&s));
4664 ds_destroy(&s);
4665 }
6814e51f
BP
4666 }
4667
4668 /* Check the datapath actions for consistency. */
050ac423 4669 ofpbuf_use_stub(&odp_actions, odp_actions_stub, sizeof odp_actions_stub);
6814e51f 4670 LIST_FOR_EACH (subfacet, list_node, &facet->subfacets) {
6a7e895f 4671 enum subfacet_path want_path;
9616614b 4672 struct odputil_keybuf keybuf;
6814e51f 4673 struct action_xlate_ctx ctx;
9616614b
BP
4674 struct ofpbuf key;
4675 struct ds s;
6814e51f
BP
4676
4677 action_xlate_ctx_init(&ctx, ofproto, &facet->flow,
0e553d9c 4678 subfacet->initial_tci, rule, 0, NULL);
f25d0cf3 4679 xlate_actions(&ctx, rule->up.ofpacts, rule->up.ofpacts_len,
050ac423 4680 &odp_actions);
6814e51f 4681
6a7e895f
BP
4682 if (subfacet->path == SF_NOT_INSTALLED) {
4683 /* This only happens if the datapath reported an error when we
4684 * tried to install the flow. Don't flag another error here. */
4685 continue;
4686 }
4687
4688 want_path = subfacet_want_path(subfacet->slow);
4689 if (want_path == SF_SLOW_PATH && subfacet->path == SF_SLOW_PATH) {
4690 /* The actions for slow-path flows may legitimately vary from one
4691 * packet to the next. We're done. */
050ac423 4692 continue;
6814e51f
BP
4693 }
4694
6a7e895f 4695 if (!subfacet_should_install(subfacet, subfacet->slow, &odp_actions)) {
9616614b
BP
4696 continue;
4697 }
c53e1132 4698
9616614b
BP
4699 /* Inconsistency! */
4700 if (ok) {
4701 may_log = !VLOG_DROP_WARN(&rl);
4702 ok = false;
4703 }
4704 if (!may_log) {
4705 /* Rate-limited, skip reporting. */
4706 continue;
4707 }
c53e1132 4708
9616614b
BP
4709 ds_init(&s);
4710 subfacet_get_key(subfacet, &keybuf, &key);
4711 odp_flow_key_format(key.data, key.size, &s);
4712
4713 ds_put_cstr(&s, ": inconsistency in subfacet");
6a7e895f 4714 if (want_path != subfacet->path) {
9616614b
BP
4715 enum odp_key_fitness fitness = subfacet->key_fitness;
4716
6a7e895f
BP
4717 ds_put_format(&s, " (%s, fitness=%s)",
4718 subfacet_path_to_string(subfacet->path),
9616614b 4719 odp_key_fitness_to_string(fitness));
6a7e895f
BP
4720 ds_put_format(&s, " (should have been %s)",
4721 subfacet_path_to_string(want_path));
4722 } else if (want_path == SF_FAST_PATH) {
9616614b
BP
4723 ds_put_cstr(&s, " (actions were: ");
4724 format_odp_actions(&s, subfacet->actions,
4725 subfacet->actions_len);
4726 ds_put_cstr(&s, ") (correct actions: ");
4727 format_odp_actions(&s, odp_actions.data, odp_actions.size);
4728 ds_put_char(&s, ')');
4729 } else {
4730 ds_put_cstr(&s, " (actions: ");
4731 format_odp_actions(&s, subfacet->actions,
4732 subfacet->actions_len);
4733 ds_put_char(&s, ')');
6814e51f 4734 }
9616614b
BP
4735 VLOG_WARN("%s", ds_cstr(&s));
4736 ds_destroy(&s);
6814e51f 4737 }
050ac423 4738 ofpbuf_uninit(&odp_actions);
6814e51f
BP
4739
4740 return ok;
4741}
4742
15baa734 4743/* Re-searches the classifier for 'facet':
abe529af
BP
4744 *
4745 * - If the rule found is different from 'facet''s current rule, moves
4746 * 'facet' to the new rule and recompiles its actions.
4747 *
4748 * - If the rule found is the same as 'facet''s current rule, leaves 'facet'
c57b2226
BP
4749 * where it is and recompiles its actions anyway. */
4750static void
15baa734 4751facet_revalidate(struct facet *facet)
abe529af 4752{
15baa734 4753 struct ofproto_dpif *ofproto = ofproto_dpif_cast(facet->rule->up.ofproto);
b95fc6ba
BP
4754 struct actions {
4755 struct nlattr *odp_actions;
4756 size_t actions_len;
4757 };
4758 struct actions *new_actions;
4759
abe529af 4760 struct action_xlate_ctx ctx;
050ac423
BP
4761 uint64_t odp_actions_stub[1024 / 8];
4762 struct ofpbuf odp_actions;
4763
abe529af 4764 struct rule_dpif *new_rule;
b0f7b9b5 4765 struct subfacet *subfacet;
b95fc6ba 4766 int i;
abe529af
BP
4767
4768 COVERAGE_INC(facet_revalidate);
4769
c57b2226 4770 new_rule = rule_dpif_lookup(ofproto, &facet->flow);
abe529af 4771
df2c07f4 4772 /* Calculate new datapath actions.
abe529af
BP
4773 *
4774 * We do not modify any 'facet' state yet, because we might need to, e.g.,
4775 * emit a NetFlow expiration and, if so, we need to have the old state
4776 * around to properly compose it. */
abe529af 4777
df2c07f4
JP
4778 /* If the datapath actions changed or the installability changed,
4779 * then we need to talk to the datapath. */
b95fc6ba
BP
4780 i = 0;
4781 new_actions = NULL;
4782 memset(&ctx, 0, sizeof ctx);
050ac423 4783 ofpbuf_use_stub(&odp_actions, odp_actions_stub, sizeof odp_actions_stub);
b0f7b9b5 4784 LIST_FOR_EACH (subfacet, list_node, &facet->subfacets) {
6a7e895f 4785 enum slow_path_reason slow;
b95fc6ba 4786
e84173dc 4787 action_xlate_ctx_init(&ctx, ofproto, &facet->flow,
0e553d9c 4788 subfacet->initial_tci, new_rule, 0, NULL);
f25d0cf3 4789 xlate_actions(&ctx, new_rule->up.ofpacts, new_rule->up.ofpacts_len,
050ac423 4790 &odp_actions);
b0f7b9b5 4791
6a7e895f
BP
4792 slow = (subfacet->slow & SLOW_MATCH) | ctx.slow;
4793 if (subfacet_should_install(subfacet, slow, &odp_actions)) {
4794 struct dpif_flow_stats stats;
4795
4796 subfacet_install(subfacet,
4797 odp_actions.data, odp_actions.size, &stats, slow);
4798 subfacet_update_stats(subfacet, &stats);
b95fc6ba
BP
4799
4800 if (!new_actions) {
4801 new_actions = xcalloc(list_size(&facet->subfacets),
4802 sizeof *new_actions);
4803 }
050ac423
BP
4804 new_actions[i].odp_actions = xmemdup(odp_actions.data,
4805 odp_actions.size);
4806 new_actions[i].actions_len = odp_actions.size;
abe529af 4807 }
b95fc6ba 4808
b95fc6ba 4809 i++;
b0f7b9b5 4810 }
050ac423
BP
4811 ofpbuf_uninit(&odp_actions);
4812
b95fc6ba 4813 if (new_actions) {
15baa734 4814 facet_flush_stats(facet);
abe529af
BP
4815 }
4816
4817 /* Update 'facet' now that we've taken care of all the old state. */
4818 facet->tags = ctx.tags;
4819 facet->nf_flow.output_iface = ctx.nf_output_iface;
75a75043
BP
4820 facet->has_learn = ctx.has_learn;
4821 facet->has_normal = ctx.has_normal;
0e553d9c 4822 facet->has_fin_timeout = ctx.has_fin_timeout;
9d24de3b 4823 facet->mirrors = ctx.mirrors;
6a7e895f
BP
4824
4825 i = 0;
4826 LIST_FOR_EACH (subfacet, list_node, &facet->subfacets) {
4827 subfacet->slow = (subfacet->slow & SLOW_MATCH) | ctx.slow;
4828
4829 if (new_actions && new_actions[i].odp_actions) {
4830 free(subfacet->actions);
4831 subfacet->actions = new_actions[i].odp_actions;
4832 subfacet->actions_len = new_actions[i].actions_len;
b95fc6ba 4833 }
6a7e895f 4834 i++;
abe529af 4835 }
6a7e895f
BP
4836 free(new_actions);
4837
abe529af
BP
4838 if (facet->rule != new_rule) {
4839 COVERAGE_INC(facet_changed_rule);
4840 list_remove(&facet->list_node);
4841 list_push_back(&new_rule->facets, &facet->list_node);
4842 facet->rule = new_rule;
4843 facet->used = new_rule->up.created;
9d24de3b 4844 facet->prev_used = facet->used;
abe529af 4845 }
abe529af
BP
4846}
4847
4848/* Updates 'facet''s used time. Caller is responsible for calling
4849 * facet_push_stats() to update the flows which 'facet' resubmits into. */
4850static void
15baa734 4851facet_update_time(struct facet *facet, long long int used)
abe529af 4852{
15baa734 4853 struct ofproto_dpif *ofproto = ofproto_dpif_cast(facet->rule->up.ofproto);
abe529af
BP
4854 if (used > facet->used) {
4855 facet->used = used;
1745cd08 4856 ofproto_rule_update_used(&facet->rule->up, used);
abe529af
BP
4857 netflow_flow_update_time(ofproto->netflow, &facet->nf_flow, used);
4858 }
4859}
4860
bbb5d219
EJ
4861static void
4862facet_reset_counters(struct facet *facet)
4863{
4864 facet->packet_count = 0;
4865 facet->byte_count = 0;
9d24de3b
JP
4866 facet->prev_packet_count = 0;
4867 facet->prev_byte_count = 0;
bbb5d219
EJ
4868 facet->accounted_bytes = 0;
4869}
4870
abe529af
BP
4871static void
4872facet_push_stats(struct facet *facet)
4873{
112bc5f4 4874 struct dpif_flow_stats stats;
abe529af 4875
cb22974d
BP
4876 ovs_assert(facet->packet_count >= facet->prev_packet_count);
4877 ovs_assert(facet->byte_count >= facet->prev_byte_count);
4878 ovs_assert(facet->used >= facet->prev_used);
abe529af 4879
112bc5f4
BP
4880 stats.n_packets = facet->packet_count - facet->prev_packet_count;
4881 stats.n_bytes = facet->byte_count - facet->prev_byte_count;
4882 stats.used = facet->used;
4883 stats.tcp_flags = 0;
abe529af 4884
112bc5f4 4885 if (stats.n_packets || stats.n_bytes || facet->used > facet->prev_used) {
9d24de3b
JP
4886 facet->prev_packet_count = facet->packet_count;
4887 facet->prev_byte_count = facet->byte_count;
4888 facet->prev_used = facet->used;
abe529af 4889
112bc5f4 4890 flow_push_stats(facet->rule, &facet->flow, &stats);
9d24de3b
JP
4891
4892 update_mirror_stats(ofproto_dpif_cast(facet->rule->up.ofproto),
112bc5f4 4893 facet->mirrors, stats.n_packets, stats.n_bytes);
abe529af
BP
4894 }
4895}
4896
abe529af 4897static void
112bc5f4 4898rule_credit_stats(struct rule_dpif *rule, const struct dpif_flow_stats *stats)
abe529af 4899{
112bc5f4
BP
4900 rule->packet_count += stats->n_packets;
4901 rule->byte_count += stats->n_bytes;
4902 ofproto_rule_update_used(&rule->up, stats->used);
abe529af
BP
4903}
4904
4905/* Pushes flow statistics to the rules which 'flow' resubmits into given
9d24de3b 4906 * 'rule''s actions and mirrors. */
abe529af 4907static void
18b2a258 4908flow_push_stats(struct rule_dpif *rule,
112bc5f4 4909 const struct flow *flow, const struct dpif_flow_stats *stats)
abe529af
BP
4910{
4911 struct ofproto_dpif *ofproto = ofproto_dpif_cast(rule->up.ofproto);
112bc5f4 4912 struct action_xlate_ctx ctx;
abe529af 4913
112bc5f4 4914 ofproto_rule_update_used(&rule->up, stats->used);
f3b50afb 4915
112bc5f4 4916 action_xlate_ctx_init(&ctx, ofproto, flow, flow->vlan_tci, rule,
0e553d9c 4917 0, NULL);
112bc5f4 4918 ctx.resubmit_stats = stats;
f25d0cf3
BP
4919 xlate_actions_for_side_effects(&ctx, rule->up.ofpacts,
4920 rule->up.ofpacts_len);
abe529af
BP
4921}
4922\f
b0f7b9b5
BP
4923/* Subfacets. */
4924
4925static struct subfacet *
acf60855
JP
4926subfacet_find(struct ofproto_dpif *ofproto,
4927 const struct nlattr *key, size_t key_len, uint32_t key_hash,
4928 const struct flow *flow)
b0f7b9b5
BP
4929{
4930 struct subfacet *subfacet;
4931
4932 HMAP_FOR_EACH_WITH_HASH (subfacet, hmap_node, key_hash,
4933 &ofproto->subfacets) {
4934 if (subfacet->key
4935 ? (subfacet->key_len == key_len
4936 && !memcmp(key, subfacet->key, key_len))
4937 : flow_equal(flow, &subfacet->facet->flow)) {
4938 return subfacet;
4939 }
4940 }
4941
4942 return NULL;
4943}
4944
4945/* Searches 'facet' (within 'ofproto') for a subfacet with the specified
a088a1ff
JP
4946 * 'key_fitness', 'key', and 'key_len' members in 'miss'. Returns the
4947 * existing subfacet if there is one, otherwise creates and returns a
4948 * new subfacet.
b95fc6ba
BP
4949 *
4950 * If the returned subfacet is new, then subfacet->actions will be NULL, in
4951 * which case the caller must populate the actions with
4952 * subfacet_make_actions(). */
b0f7b9b5 4953static struct subfacet *
a088a1ff
JP
4954subfacet_create(struct facet *facet, struct flow_miss *miss,
4955 long long int now)
b0f7b9b5 4956{
15baa734 4957 struct ofproto_dpif *ofproto = ofproto_dpif_cast(facet->rule->up.ofproto);
a088a1ff
JP
4958 enum odp_key_fitness key_fitness = miss->key_fitness;
4959 const struct nlattr *key = miss->key;
4960 size_t key_len = miss->key_len;
4961 uint32_t key_hash;
b0f7b9b5
BP
4962 struct subfacet *subfacet;
4963
a088a1ff
JP
4964 key_hash = odp_flow_key_hash(key, key_len);
4965
3b145dd7
BP
4966 if (list_is_empty(&facet->subfacets)) {
4967 subfacet = &facet->one_subfacet;
4968 } else {
acf60855
JP
4969 subfacet = subfacet_find(ofproto, key, key_len, key_hash,
4970 &facet->flow);
3b145dd7
BP
4971 if (subfacet) {
4972 if (subfacet->facet == facet) {
4973 return subfacet;
4974 }
4975
4976 /* This shouldn't happen. */
4977 VLOG_ERR_RL(&rl, "subfacet with wrong facet");
4978 subfacet_destroy(subfacet);
b0f7b9b5
BP
4979 }
4980
3b145dd7 4981 subfacet = xmalloc(sizeof *subfacet);
b0f7b9b5
BP
4982 }
4983
b0f7b9b5
BP
4984 hmap_insert(&ofproto->subfacets, &subfacet->hmap_node, key_hash);
4985 list_push_back(&facet->subfacets, &subfacet->list_node);
4986 subfacet->facet = facet;
b0f7b9b5
BP
4987 subfacet->key_fitness = key_fitness;
4988 if (key_fitness != ODP_FIT_PERFECT) {
4989 subfacet->key = xmemdup(key, key_len);
4990 subfacet->key_len = key_len;
26cd7e34
BP
4991 } else {
4992 subfacet->key = NULL;
4993 subfacet->key_len = 0;
b0f7b9b5 4994 }
459b16a1 4995 subfacet->used = now;
26cd7e34
BP
4996 subfacet->dp_packet_count = 0;
4997 subfacet->dp_byte_count = 0;
4998 subfacet->actions_len = 0;
4999 subfacet->actions = NULL;
6a7e895f
BP
5000 subfacet->slow = (subfacet->key_fitness == ODP_FIT_TOO_LITTLE
5001 ? SLOW_MATCH
5002 : 0);
5003 subfacet->path = SF_NOT_INSTALLED;
a088a1ff
JP
5004 subfacet->initial_tci = miss->initial_tci;
5005 subfacet->odp_in_port = miss->odp_in_port;
b0f7b9b5
BP
5006
5007 return subfacet;
5008}
5009
b0f7b9b5
BP
5010/* Uninstalls 'subfacet' from the datapath, if it is installed, removes it from
5011 * its facet within 'ofproto', and frees it. */
5012static void
15baa734 5013subfacet_destroy__(struct subfacet *subfacet)
b0f7b9b5 5014{
15baa734
BP
5015 struct facet *facet = subfacet->facet;
5016 struct ofproto_dpif *ofproto = ofproto_dpif_cast(facet->rule->up.ofproto);
5017
5018 subfacet_uninstall(subfacet);
b0f7b9b5
BP
5019 hmap_remove(&ofproto->subfacets, &subfacet->hmap_node);
5020 list_remove(&subfacet->list_node);
5021 free(subfacet->key);
b95fc6ba 5022 free(subfacet->actions);
26cd7e34
BP
5023 if (subfacet != &facet->one_subfacet) {
5024 free(subfacet);
5025 }
b0f7b9b5
BP
5026}
5027
5028/* Destroys 'subfacet', as with subfacet_destroy__(), and then if this was the
5029 * last remaining subfacet in its facet destroys the facet too. */
5030static void
15baa734 5031subfacet_destroy(struct subfacet *subfacet)
b0f7b9b5
BP
5032{
5033 struct facet *facet = subfacet->facet;
5034
551a2f6c
BP
5035 if (list_is_singleton(&facet->subfacets)) {
5036 /* facet_remove() needs at least one subfacet (it will remove it). */
15baa734 5037 facet_remove(facet);
551a2f6c 5038 } else {
15baa734 5039 subfacet_destroy__(subfacet);
b0f7b9b5
BP
5040 }
5041}
5042
1d85f9e5
JP
5043static void
5044subfacet_destroy_batch(struct ofproto_dpif *ofproto,
5045 struct subfacet **subfacets, int n)
5046{
5047 struct odputil_keybuf keybufs[SUBFACET_DESTROY_MAX_BATCH];
5048 struct dpif_op ops[SUBFACET_DESTROY_MAX_BATCH];
5049 struct dpif_op *opsp[SUBFACET_DESTROY_MAX_BATCH];
5050 struct ofpbuf keys[SUBFACET_DESTROY_MAX_BATCH];
5051 struct dpif_flow_stats stats[SUBFACET_DESTROY_MAX_BATCH];
5052 int i;
5053
5054 for (i = 0; i < n; i++) {
5055 ops[i].type = DPIF_OP_FLOW_DEL;
5056 subfacet_get_key(subfacets[i], &keybufs[i], &keys[i]);
5057 ops[i].u.flow_del.key = keys[i].data;
5058 ops[i].u.flow_del.key_len = keys[i].size;
5059 ops[i].u.flow_del.stats = &stats[i];
5060 opsp[i] = &ops[i];
5061 }
5062
acf60855 5063 dpif_operate(ofproto->backer->dpif, opsp, n);
1d85f9e5
JP
5064 for (i = 0; i < n; i++) {
5065 subfacet_reset_dp_stats(subfacets[i], &stats[i]);
5066 subfacets[i]->path = SF_NOT_INSTALLED;
5067 subfacet_destroy(subfacets[i]);
5068 }
5069}
5070
b0f7b9b5
BP
5071/* Initializes 'key' with the sequence of OVS_KEY_ATTR_* Netlink attributes
5072 * that can be used to refer to 'subfacet'. The caller must provide 'keybuf'
5073 * for use as temporary storage. */
5074static void
5075subfacet_get_key(struct subfacet *subfacet, struct odputil_keybuf *keybuf,
5076 struct ofpbuf *key)
5077{
e1b1d06a 5078
b0f7b9b5 5079 if (!subfacet->key) {
ddbfda84
JP
5080 struct flow *flow = &subfacet->facet->flow;
5081
b0f7b9b5 5082 ofpbuf_use_stack(key, keybuf, sizeof *keybuf);
a088a1ff 5083 odp_flow_key_from_flow(key, flow, subfacet->odp_in_port);
b0f7b9b5
BP
5084 } else {
5085 ofpbuf_use_const(key, subfacet->key, subfacet->key_len);
5086 }
5087}
5088
5fe20d5d
BP
5089/* Composes the datapath actions for 'subfacet' based on its rule's actions.
5090 * Translates the actions into 'odp_actions', which the caller must have
5091 * initialized and is responsible for uninitializing. */
b95fc6ba 5092static void
5fe20d5d
BP
5093subfacet_make_actions(struct subfacet *subfacet, const struct ofpbuf *packet,
5094 struct ofpbuf *odp_actions)
b95fc6ba
BP
5095{
5096 struct facet *facet = subfacet->facet;
18b2a258 5097 struct rule_dpif *rule = facet->rule;
15baa734 5098 struct ofproto_dpif *ofproto = ofproto_dpif_cast(rule->up.ofproto);
050ac423 5099
b95fc6ba
BP
5100 struct action_xlate_ctx ctx;
5101
15baa734 5102 action_xlate_ctx_init(&ctx, ofproto, &facet->flow, subfacet->initial_tci,
0e553d9c 5103 rule, 0, packet);
f25d0cf3 5104 xlate_actions(&ctx, rule->up.ofpacts, rule->up.ofpacts_len, odp_actions);
b95fc6ba 5105 facet->tags = ctx.tags;
b95fc6ba
BP
5106 facet->has_learn = ctx.has_learn;
5107 facet->has_normal = ctx.has_normal;
0e553d9c 5108 facet->has_fin_timeout = ctx.has_fin_timeout;
b95fc6ba 5109 facet->nf_flow.output_iface = ctx.nf_output_iface;
9d24de3b 5110 facet->mirrors = ctx.mirrors;
b95fc6ba 5111
6a7e895f 5112 subfacet->slow = (subfacet->slow & SLOW_MATCH) | ctx.slow;
5fe20d5d
BP
5113 if (subfacet->actions_len != odp_actions->size
5114 || memcmp(subfacet->actions, odp_actions->data, odp_actions->size)) {
b95fc6ba 5115 free(subfacet->actions);
5fe20d5d
BP
5116 subfacet->actions_len = odp_actions->size;
5117 subfacet->actions = xmemdup(odp_actions->data, odp_actions->size);
b95fc6ba 5118 }
b95fc6ba
BP
5119}
5120
b0f7b9b5
BP
5121/* Updates 'subfacet''s datapath flow, setting its actions to 'actions_len'
5122 * bytes of actions in 'actions'. If 'stats' is non-null, statistics counters
5123 * in the datapath will be zeroed and 'stats' will be updated with traffic new
5124 * since 'subfacet' was last updated.
5125 *
5126 * Returns 0 if successful, otherwise a positive errno value. */
5127static int
15baa734 5128subfacet_install(struct subfacet *subfacet,
b0f7b9b5 5129 const struct nlattr *actions, size_t actions_len,
6a7e895f
BP
5130 struct dpif_flow_stats *stats,
5131 enum slow_path_reason slow)
b0f7b9b5 5132{
15baa734
BP
5133 struct facet *facet = subfacet->facet;
5134 struct ofproto_dpif *ofproto = ofproto_dpif_cast(facet->rule->up.ofproto);
6a7e895f
BP
5135 enum subfacet_path path = subfacet_want_path(slow);
5136 uint64_t slow_path_stub[128 / 8];
b0f7b9b5
BP
5137 struct odputil_keybuf keybuf;
5138 enum dpif_flow_put_flags flags;
5139 struct ofpbuf key;
5140 int ret;
5141
5142 flags = DPIF_FP_CREATE | DPIF_FP_MODIFY;
5143 if (stats) {
5144 flags |= DPIF_FP_ZERO_STATS;
5145 }
5146
6a7e895f
BP
5147 if (path == SF_SLOW_PATH) {
5148 compose_slow_path(ofproto, &facet->flow, slow,
5149 slow_path_stub, sizeof slow_path_stub,
5150 &actions, &actions_len);
5151 }
5152
b0f7b9b5 5153 subfacet_get_key(subfacet, &keybuf, &key);
acf60855 5154 ret = dpif_flow_put(ofproto->backer->dpif, flags, key.data, key.size,
b0f7b9b5
BP
5155 actions, actions_len, stats);
5156
5157 if (stats) {
5158 subfacet_reset_dp_stats(subfacet, stats);
5159 }
5160
6a7e895f
BP
5161 if (!ret) {
5162 subfacet->path = path;
5163 }
b0f7b9b5
BP
5164 return ret;
5165}
5166
6a7e895f
BP
5167static int
5168subfacet_reinstall(struct subfacet *subfacet, struct dpif_flow_stats *stats)
5169{
5170 return subfacet_install(subfacet, subfacet->actions, subfacet->actions_len,
5171 stats, subfacet->slow);
5172}
5173
b0f7b9b5
BP
5174/* If 'subfacet' is installed in the datapath, uninstalls it. */
5175static void
15baa734 5176subfacet_uninstall(struct subfacet *subfacet)
b0f7b9b5 5177{
6a7e895f 5178 if (subfacet->path != SF_NOT_INSTALLED) {
15baa734
BP
5179 struct rule_dpif *rule = subfacet->facet->rule;
5180 struct ofproto_dpif *ofproto = ofproto_dpif_cast(rule->up.ofproto);
b0f7b9b5
BP
5181 struct odputil_keybuf keybuf;
5182 struct dpif_flow_stats stats;
5183 struct ofpbuf key;
5184 int error;
5185
5186 subfacet_get_key(subfacet, &keybuf, &key);
acf60855
JP
5187 error = dpif_flow_del(ofproto->backer->dpif,
5188 key.data, key.size, &stats);
b0f7b9b5
BP
5189 subfacet_reset_dp_stats(subfacet, &stats);
5190 if (!error) {
15baa734 5191 subfacet_update_stats(subfacet, &stats);
b0f7b9b5 5192 }
6a7e895f 5193 subfacet->path = SF_NOT_INSTALLED;
b0f7b9b5 5194 } else {
cb22974d
BP
5195 ovs_assert(subfacet->dp_packet_count == 0);
5196 ovs_assert(subfacet->dp_byte_count == 0);
b0f7b9b5
BP
5197 }
5198}
5199
5200/* Resets 'subfacet''s datapath statistics counters. This should be called
5201 * when 'subfacet''s statistics are cleared in the datapath. If 'stats' is
5202 * non-null, it should contain the statistics returned by dpif when 'subfacet'
5203 * was reset in the datapath. 'stats' will be modified to include only
5204 * statistics new since 'subfacet' was last updated. */
5205static void
5206subfacet_reset_dp_stats(struct subfacet *subfacet,
5207 struct dpif_flow_stats *stats)
5208{
5209 if (stats
5210 && subfacet->dp_packet_count <= stats->n_packets
5211 && subfacet->dp_byte_count <= stats->n_bytes) {
5212 stats->n_packets -= subfacet->dp_packet_count;
5213 stats->n_bytes -= subfacet->dp_byte_count;
5214 }
5215
5216 subfacet->dp_packet_count = 0;
5217 subfacet->dp_byte_count = 0;
5218}
5219
5220/* Updates 'subfacet''s used time. The caller is responsible for calling
5221 * facet_push_stats() to update the flows which 'subfacet' resubmits into. */
5222static void
15baa734 5223subfacet_update_time(struct subfacet *subfacet, long long int used)
b0f7b9b5
BP
5224{
5225 if (used > subfacet->used) {
5226 subfacet->used = used;
15baa734 5227 facet_update_time(subfacet->facet, used);
b0f7b9b5
BP
5228 }
5229}
5230
5231/* Folds the statistics from 'stats' into the counters in 'subfacet'.
5232 *
5233 * Because of the meaning of a subfacet's counters, it only makes sense to do
5234 * this if 'stats' are not tracked in the datapath, that is, if 'stats'
5235 * represents a packet that was sent by hand or if it represents statistics
5236 * that have been cleared out of the datapath. */
5237static void
15baa734 5238subfacet_update_stats(struct subfacet *subfacet,
b0f7b9b5
BP
5239 const struct dpif_flow_stats *stats)
5240{
5241 if (stats->n_packets || stats->used > subfacet->used) {
5242 struct facet *facet = subfacet->facet;
5243
15baa734 5244 subfacet_update_time(subfacet, stats->used);
b0f7b9b5
BP
5245 facet->packet_count += stats->n_packets;
5246 facet->byte_count += stats->n_bytes;
0e553d9c 5247 facet->tcp_flags |= stats->tcp_flags;
b0f7b9b5
BP
5248 facet_push_stats(facet);
5249 netflow_flow_update_flags(&facet->nf_flow, stats->tcp_flags);
5250 }
5251}
5252\f
abe529af
BP
5253/* Rules. */
5254
5255static struct rule_dpif *
c57b2226
BP
5256rule_dpif_lookup(struct ofproto_dpif *ofproto, const struct flow *flow)
5257{
c57b2226
BP
5258 struct rule_dpif *rule;
5259
5260 rule = rule_dpif_lookup__(ofproto, flow, 0);
5261 if (rule) {
5262 return rule;
5263 }
5264
c376f9a3 5265 return rule_dpif_miss_rule(ofproto, flow);
c57b2226
BP
5266}
5267
5268static struct rule_dpif *
5269rule_dpif_lookup__(struct ofproto_dpif *ofproto, const struct flow *flow,
5270 uint8_t table_id)
abe529af 5271{
7257b535
BP
5272 struct cls_rule *cls_rule;
5273 struct classifier *cls;
5274
9cdaaebe
BP
5275 if (table_id >= N_TABLES) {
5276 return NULL;
5277 }
5278
d0918789 5279 cls = &ofproto->up.tables[table_id].cls;
eadef313 5280 if (flow->nw_frag & FLOW_NW_FRAG_ANY
7257b535
BP
5281 && ofproto->up.frag_handling == OFPC_FRAG_NORMAL) {
5282 /* For OFPC_NORMAL frag_handling, we must pretend that transport ports
5283 * are unavailable. */
5284 struct flow ofpc_normal_flow = *flow;
5285 ofpc_normal_flow.tp_src = htons(0);
5286 ofpc_normal_flow.tp_dst = htons(0);
5287 cls_rule = classifier_lookup(cls, &ofpc_normal_flow);
5288 } else {
5289 cls_rule = classifier_lookup(cls, flow);
5290 }
5291 return rule_dpif_cast(rule_from_cls_rule(cls_rule));
abe529af
BP
5292}
5293
c376f9a3
IY
5294static struct rule_dpif *
5295rule_dpif_miss_rule(struct ofproto_dpif *ofproto, const struct flow *flow)
5296{
5297 struct ofport_dpif *port;
5298
5299 port = get_ofp_port(ofproto, flow->in_port);
5300 if (!port) {
5301 VLOG_WARN_RL(&rl, "packet-in on unknown port %"PRIu16, flow->in_port);
5302 return ofproto->miss_rule;
5303 }
5304
5305 if (port->up.pp.config & OFPUTIL_PC_NO_PACKET_IN) {
5306 return ofproto->no_packet_in_rule;
5307 }
5308 return ofproto->miss_rule;
5309}
5310
7ee20df1
BP
5311static void
5312complete_operation(struct rule_dpif *rule)
5313{
5314 struct ofproto_dpif *ofproto = ofproto_dpif_cast(rule->up.ofproto);
5315
54a9cbc9 5316 rule_invalidate(rule);
7ee20df1
BP
5317 if (clogged) {
5318 struct dpif_completion *c = xmalloc(sizeof *c);
5319 c->op = rule->up.pending;
5320 list_push_back(&ofproto->completions, &c->list_node);
5321 } else {
5322 ofoperation_complete(rule->up.pending, 0);
5323 }
5324}
5325
abe529af
BP
5326static struct rule *
5327rule_alloc(void)
5328{
5329 struct rule_dpif *rule = xmalloc(sizeof *rule);
5330 return &rule->up;
5331}
5332
5333static void
5334rule_dealloc(struct rule *rule_)
5335{
5336 struct rule_dpif *rule = rule_dpif_cast(rule_);
5337 free(rule);
5338}
5339
90bf1e07 5340static enum ofperr
abe529af
BP
5341rule_construct(struct rule *rule_)
5342{
5343 struct rule_dpif *rule = rule_dpif_cast(rule_);
5344 struct ofproto_dpif *ofproto = ofproto_dpif_cast(rule->up.ofproto);
7ee20df1 5345 struct rule_dpif *victim;
54a9cbc9 5346 uint8_t table_id;
abe529af 5347
abe529af
BP
5348 rule->packet_count = 0;
5349 rule->byte_count = 0;
abe529af 5350
7ee20df1
BP
5351 victim = rule_dpif_cast(ofoperation_get_victim(rule->up.pending));
5352 if (victim && !list_is_empty(&victim->facets)) {
5353 struct facet *facet;
5354
5355 rule->facets = victim->facets;
5356 list_moved(&rule->facets);
5357 LIST_FOR_EACH (facet, list_node, &rule->facets) {
bbb5d219
EJ
5358 /* XXX: We're only clearing our local counters here. It's possible
5359 * that quite a few packets are unaccounted for in the datapath
5360 * statistics. These will be accounted to the new rule instead of
5361 * cleared as required. This could be fixed by clearing out the
5362 * datapath statistics for this facet, but currently it doesn't
5363 * seem worth it. */
5364 facet_reset_counters(facet);
7ee20df1
BP
5365 facet->rule = rule;
5366 }
5367 } else {
5368 /* Must avoid list_moved() in this case. */
5369 list_init(&rule->facets);
5370 }
abe529af 5371
54a9cbc9 5372 table_id = rule->up.table_id;
5cb7a798
BP
5373 if (victim) {
5374 rule->tag = victim->tag;
5375 } else if (table_id == 0) {
5376 rule->tag = 0;
5377 } else {
5378 struct flow flow;
5379
5380 miniflow_expand(&rule->up.cr.match.flow, &flow);
5381 rule->tag = rule_calculate_tag(&flow, &rule->up.cr.match.mask,
5382 ofproto->tables[table_id].basis);
5383 }
54a9cbc9 5384
7ee20df1 5385 complete_operation(rule);
abe529af
BP
5386 return 0;
5387}
5388
5389static void
5390rule_destruct(struct rule *rule_)
5391{
5392 struct rule_dpif *rule = rule_dpif_cast(rule_);
abe529af
BP
5393 struct facet *facet, *next_facet;
5394
abe529af 5395 LIST_FOR_EACH_SAFE (facet, next_facet, list_node, &rule->facets) {
15baa734 5396 facet_revalidate(facet);
abe529af 5397 }
7ee20df1
BP
5398
5399 complete_operation(rule);
abe529af
BP
5400}
5401
5402static void
5403rule_get_stats(struct rule *rule_, uint64_t *packets, uint64_t *bytes)
5404{
5405 struct rule_dpif *rule = rule_dpif_cast(rule_);
5406 struct facet *facet;
5407
5408 /* Start from historical data for 'rule' itself that are no longer tracked
5409 * in facets. This counts, for example, facets that have expired. */
5410 *packets = rule->packet_count;
5411 *bytes = rule->byte_count;
5412
5413 /* Add any statistics that are tracked by facets. This includes
5414 * statistical data recently updated by ofproto_update_stats() as well as
5415 * stats for packets that were executed "by hand" via dpif_execute(). */
5416 LIST_FOR_EACH (facet, list_node, &rule->facets) {
5417 *packets += facet->packet_count;
5418 *bytes += facet->byte_count;
5419 }
5420}
5421
0a740f48
EJ
5422static void
5423rule_dpif_execute(struct rule_dpif *rule, const struct flow *flow,
5424 struct ofpbuf *packet)
abe529af 5425{
abe529af 5426 struct ofproto_dpif *ofproto = ofproto_dpif_cast(rule->up.ofproto);
050ac423 5427
112bc5f4 5428 struct dpif_flow_stats stats;
050ac423 5429
abe529af 5430 struct action_xlate_ctx ctx;
050ac423
BP
5431 uint64_t odp_actions_stub[1024 / 8];
5432 struct ofpbuf odp_actions;
abe529af 5433
a7752d4a 5434 dpif_flow_stats_extract(flow, packet, time_msec(), &stats);
112bc5f4
BP
5435 rule_credit_stats(rule, &stats);
5436
050ac423 5437 ofpbuf_use_stub(&odp_actions, odp_actions_stub, sizeof odp_actions_stub);
54834960 5438 action_xlate_ctx_init(&ctx, ofproto, flow, flow->vlan_tci,
112bc5f4
BP
5439 rule, stats.tcp_flags, packet);
5440 ctx.resubmit_stats = &stats;
f25d0cf3 5441 xlate_actions(&ctx, rule->up.ofpacts, rule->up.ofpacts_len, &odp_actions);
112bc5f4
BP
5442
5443 execute_odp_actions(ofproto, flow, odp_actions.data,
5444 odp_actions.size, packet);
5445
050ac423 5446 ofpbuf_uninit(&odp_actions);
0a740f48 5447}
5bf0e941 5448
0a740f48
EJ
5449static enum ofperr
5450rule_execute(struct rule *rule, const struct flow *flow,
5451 struct ofpbuf *packet)
5452{
5453 rule_dpif_execute(rule_dpif_cast(rule), flow, packet);
5454 ofpbuf_delete(packet);
5bf0e941 5455 return 0;
abe529af
BP
5456}
5457
7ee20df1
BP
5458static void
5459rule_modify_actions(struct rule *rule_)
abe529af
BP
5460{
5461 struct rule_dpif *rule = rule_dpif_cast(rule_);
7ee20df1
BP
5462
5463 complete_operation(rule);
abe529af
BP
5464}
5465\f
97d6520b 5466/* Sends 'packet' out 'ofport'.
52a90c29 5467 * May modify 'packet'.
abe529af
BP
5468 * Returns 0 if successful, otherwise a positive errno value. */
5469static int
52a90c29 5470send_packet(const struct ofport_dpif *ofport, struct ofpbuf *packet)
abe529af 5471{
97d6520b 5472 const struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofport->up.ofproto);
b9ad7294 5473 uint64_t odp_actions_stub[1024 / 8];
80e5eed9
BP
5474 struct ofpbuf key, odp_actions;
5475 struct odputil_keybuf keybuf;
9b56fe13 5476 uint32_t odp_port;
80e5eed9 5477 struct flow flow;
abe529af
BP
5478 int error;
5479
72e8bf28 5480 flow_extract(packet, 0, 0, NULL, OFPP_LOCAL, &flow);
0a740f48
EJ
5481 if (netdev_vport_is_patch(ofport->up.netdev)) {
5482 struct ofproto_dpif *peer_ofproto;
5483 struct dpif_flow_stats stats;
5484 struct ofport_dpif *peer;
5485 struct rule_dpif *rule;
5486
5487 peer = ofport_get_peer(ofport);
5488 if (!peer) {
5489 return ENODEV;
5490 }
5491
5492 dpif_flow_stats_extract(&flow, packet, time_msec(), &stats);
b9ad7294
EJ
5493 netdev_vport_inc_tx(ofport->up.netdev, &stats);
5494 netdev_vport_inc_rx(peer->up.netdev, &stats);
0a740f48
EJ
5495
5496 flow.in_port = peer->up.ofp_port;
5497 peer_ofproto = ofproto_dpif_cast(peer->up.ofproto);
5498 rule = rule_dpif_lookup(peer_ofproto, &flow);
5499 rule_dpif_execute(rule, &flow, packet);
5500
5501 return 0;
5502 }
5503
b9ad7294
EJ
5504 ofpbuf_use_stub(&odp_actions, odp_actions_stub, sizeof odp_actions_stub);
5505
5506 if (ofport->tnl_port) {
5507 struct dpif_flow_stats stats;
5508
5509 odp_port = tnl_port_send(ofport->tnl_port, &flow);
5510 if (odp_port == OVSP_NONE) {
5511 return ENODEV;
5512 }
5513
5514 dpif_flow_stats_extract(&flow, packet, time_msec(), &stats);
5515 netdev_vport_inc_tx(ofport->up.netdev, &stats);
5516 odp_put_tunnel_action(&flow.tunnel, &odp_actions);
5517 } else {
5518 odp_port = vsp_realdev_to_vlandev(ofproto, ofport->odp_port,
5519 flow.vlan_tci);
5520 if (odp_port != ofport->odp_port) {
5521 eth_pop_vlan(packet);
5522 flow.vlan_tci = htons(0);
5523 }
52a90c29
BP
5524 }
5525
80e5eed9 5526 ofpbuf_use_stack(&key, &keybuf, sizeof keybuf);
e1b1d06a
JP
5527 odp_flow_key_from_flow(&key, &flow,
5528 ofp_port_to_odp_port(ofproto, flow.in_port));
80e5eed9 5529
6ff686f2
PS
5530 compose_sflow_action(ofproto, &odp_actions, &flow, odp_port);
5531
df2c07f4 5532 nl_msg_put_u32(&odp_actions, OVS_ACTION_ATTR_OUTPUT, odp_port);
acf60855 5533 error = dpif_execute(ofproto->backer->dpif,
80e5eed9
BP
5534 key.data, key.size,
5535 odp_actions.data, odp_actions.size,
abe529af
BP
5536 packet);
5537 ofpbuf_uninit(&odp_actions);
5538
5539 if (error) {
5540 VLOG_WARN_RL(&rl, "%s: failed to send packet on port %"PRIu32" (%s)",
5541 ofproto->up.name, odp_port, strerror(error));
5542 }
6527c598 5543 ofproto_update_local_port_stats(ofport->up.ofproto, packet->size, 0);
abe529af
BP
5544 return error;
5545}
5546\f
df2c07f4 5547/* OpenFlow to datapath action translation. */
abe529af 5548
f25d0cf3
BP
5549static void do_xlate_actions(const struct ofpact *, size_t ofpacts_len,
5550 struct action_xlate_ctx *);
4cd78906 5551static void xlate_normal(struct action_xlate_ctx *);
abe529af 5552
6a7e895f
BP
5553/* Composes an ODP action for a "slow path" action for 'flow' within 'ofproto'.
5554 * The action will state 'slow' as the reason that the action is in the slow
5555 * path. (This is purely informational: it allows a human viewing "ovs-dpctl
5556 * dump-flows" output to see why a flow is in the slow path.)
5557 *
5558 * The 'stub_size' bytes in 'stub' will be used to store the action.
5559 * 'stub_size' must be large enough for the action.
5560 *
5561 * The action and its size will be stored in '*actionsp' and '*actions_lenp',
5562 * respectively. */
5563static void
5564compose_slow_path(const struct ofproto_dpif *ofproto, const struct flow *flow,
5565 enum slow_path_reason slow,
5566 uint64_t *stub, size_t stub_size,
5567 const struct nlattr **actionsp, size_t *actions_lenp)
5568{
5569 union user_action_cookie cookie;
5570 struct ofpbuf buf;
5571
5572 cookie.type = USER_ACTION_COOKIE_SLOW_PATH;
5573 cookie.slow_path.unused = 0;
5574 cookie.slow_path.reason = slow;
5575
5576 ofpbuf_use_stack(&buf, stub, stub_size);
625b0720 5577 if (slow & (SLOW_CFM | SLOW_LACP | SLOW_STP)) {
9032f11e 5578 uint32_t pid = dpif_port_get_pid(ofproto->backer->dpif, UINT32_MAX);
625b0720
BP
5579 odp_put_userspace_action(pid, &cookie, &buf);
5580 } else {
5581 put_userspace_action(ofproto, &buf, flow, &cookie);
5582 }
6a7e895f
BP
5583 *actionsp = buf.data;
5584 *actions_lenp = buf.size;
5585}
5586
98403001
BP
5587static size_t
5588put_userspace_action(const struct ofproto_dpif *ofproto,
5589 struct ofpbuf *odp_actions,
5590 const struct flow *flow,
1673e0e4 5591 const union user_action_cookie *cookie)
98403001 5592{
98403001
BP
5593 uint32_t pid;
5594
acf60855 5595 pid = dpif_port_get_pid(ofproto->backer->dpif,
e1b1d06a 5596 ofp_port_to_odp_port(ofproto, flow->in_port));
98403001 5597
39db78a0 5598 return odp_put_userspace_action(pid, cookie, odp_actions);
98403001
BP
5599}
5600
36fc5f18
BP
5601static void
5602compose_sflow_cookie(const struct ofproto_dpif *ofproto,
5603 ovs_be16 vlan_tci, uint32_t odp_port,
1673e0e4 5604 unsigned int n_outputs, union user_action_cookie *cookie)
36fc5f18
BP
5605{
5606 int ifindex;
5607
5608 cookie->type = USER_ACTION_COOKIE_SFLOW;
1673e0e4 5609 cookie->sflow.vlan_tci = vlan_tci;
36fc5f18
BP
5610
5611 /* See http://www.sflow.org/sflow_version_5.txt (search for "Input/output
5612 * port information") for the interpretation of cookie->output. */
5613 switch (n_outputs) {
5614 case 0:
5615 /* 0x40000000 | 256 means "packet dropped for unknown reason". */
1673e0e4 5616 cookie->sflow.output = 0x40000000 | 256;
36fc5f18
BP
5617 break;
5618
5619 case 1:
5620 ifindex = dpif_sflow_odp_port_to_ifindex(ofproto->sflow, odp_port);
5621 if (ifindex) {
1673e0e4 5622 cookie->sflow.output = ifindex;
36fc5f18
BP
5623 break;
5624 }
5625 /* Fall through. */
5626 default:
5627 /* 0x80000000 means "multiple output ports. */
1673e0e4 5628 cookie->sflow.output = 0x80000000 | n_outputs;
36fc5f18
BP
5629 break;
5630 }
5631}
5632
6ff686f2
PS
5633/* Compose SAMPLE action for sFlow. */
5634static size_t
5635compose_sflow_action(const struct ofproto_dpif *ofproto,
5636 struct ofpbuf *odp_actions,
5637 const struct flow *flow,
5638 uint32_t odp_port)
5639{
6ff686f2 5640 uint32_t probability;
1673e0e4 5641 union user_action_cookie cookie;
6ff686f2 5642 size_t sample_offset, actions_offset;
36fc5f18 5643 int cookie_offset;
6ff686f2
PS
5644
5645 if (!ofproto->sflow || flow->in_port == OFPP_NONE) {
5646 return 0;
5647 }
5648
6ff686f2
PS
5649 sample_offset = nl_msg_start_nested(odp_actions, OVS_ACTION_ATTR_SAMPLE);
5650
5651 /* Number of packets out of UINT_MAX to sample. */
5652 probability = dpif_sflow_get_probability(ofproto->sflow);
5653 nl_msg_put_u32(odp_actions, OVS_SAMPLE_ATTR_PROBABILITY, probability);
5654
5655 actions_offset = nl_msg_start_nested(odp_actions, OVS_SAMPLE_ATTR_ACTIONS);
36fc5f18
BP
5656 compose_sflow_cookie(ofproto, htons(0), odp_port,
5657 odp_port == OVSP_NONE ? 0 : 1, &cookie);
98403001 5658 cookie_offset = put_userspace_action(ofproto, odp_actions, flow, &cookie);
6ff686f2
PS
5659
5660 nl_msg_end_nested(odp_actions, actions_offset);
5661 nl_msg_end_nested(odp_actions, sample_offset);
98403001 5662 return cookie_offset;
6ff686f2
PS
5663}
5664
5665/* SAMPLE action must be first action in any given list of actions.
5666 * At this point we do not have all information required to build it. So try to
5667 * build sample action as complete as possible. */
5668static void
5669add_sflow_action(struct action_xlate_ctx *ctx)
5670{
5671 ctx->user_cookie_offset = compose_sflow_action(ctx->ofproto,
5672 ctx->odp_actions,
5673 &ctx->flow, OVSP_NONE);
5674 ctx->sflow_odp_port = 0;
5675 ctx->sflow_n_outputs = 0;
5676}
5677
5678/* Fix SAMPLE action according to data collected while composing ODP actions.
5679 * We need to fix SAMPLE actions OVS_SAMPLE_ATTR_ACTIONS attribute, i.e. nested
5680 * USERSPACE action's user-cookie which is required for sflow. */
5681static void
5682fix_sflow_action(struct action_xlate_ctx *ctx)
5683{
5684 const struct flow *base = &ctx->base_flow;
1673e0e4 5685 union user_action_cookie *cookie;
6ff686f2
PS
5686
5687 if (!ctx->user_cookie_offset) {
5688 return;
5689 }
5690
5691 cookie = ofpbuf_at(ctx->odp_actions, ctx->user_cookie_offset,
36fc5f18 5692 sizeof(*cookie));
cb22974d 5693 ovs_assert(cookie->type == USER_ACTION_COOKIE_SFLOW);
6ff686f2 5694
36fc5f18
BP
5695 compose_sflow_cookie(ctx->ofproto, base->vlan_tci,
5696 ctx->sflow_odp_port, ctx->sflow_n_outputs, cookie);
6ff686f2
PS
5697}
5698
6ff686f2 5699static void
81b1afb1
EJ
5700compose_output_action__(struct action_xlate_ctx *ctx, uint16_t ofp_port,
5701 bool check_stp)
6ff686f2 5702{
d59906fb 5703 const struct ofport_dpif *ofport = get_ofp_port(ctx->ofproto, ofp_port);
52a90c29 5704 ovs_be16 flow_vlan_tci = ctx->flow.vlan_tci;
b9ad7294 5705 ovs_be64 flow_tun_id = ctx->flow.tunnel.tun_id;
8b36f51e 5706 uint8_t flow_nw_tos = ctx->flow.nw_tos;
a4454ac6 5707 struct priority_to_dscp *pdscp;
0a740f48
EJ
5708 uint32_t out_port, odp_port;
5709
5710 /* If 'struct flow' gets additional metadata, we'll need to zero it out
5711 * before traversing a patch port. */
b02475c5 5712 BUILD_ASSERT_DECL(FLOW_WC_SEQ == 19);
d59906fb 5713
a4454ac6
EJ
5714 if (!ofport) {
5715 xlate_report(ctx, "Nonexistent output port");
5716 return;
5717 } else if (ofport->up.pp.config & OFPUTIL_PC_NO_FWD) {
5718 xlate_report(ctx, "OFPPC_NO_FWD set, skipping output");
5719 return;
5720 } else if (check_stp && !stp_forward_in_state(ofport->stp_state)) {
5721 xlate_report(ctx, "STP not in forwarding state, skipping output");
5722 return;
5723 }
8b36f51e 5724
0a740f48
EJ
5725 if (netdev_vport_is_patch(ofport->up.netdev)) {
5726 struct ofport_dpif *peer = ofport_get_peer(ofport);
5727 struct flow old_flow = ctx->flow;
5728 const struct ofproto_dpif *peer_ofproto;
5729
5730 if (!peer) {
5731 xlate_report(ctx, "Nonexistent patch port peer");
5732 return;
5733 }
5734
5735 peer_ofproto = ofproto_dpif_cast(peer->up.ofproto);
5736 if (peer_ofproto->backer != ctx->ofproto->backer) {
5737 xlate_report(ctx, "Patch port peer on a different datapath");
5738 return;
5739 }
5740
5741 ctx->ofproto = ofproto_dpif_cast(peer->up.ofproto);
5742 ctx->flow.in_port = peer->up.ofp_port;
5743 ctx->flow.metadata = htonll(0);
5744 memset(&ctx->flow.tunnel, 0, sizeof ctx->flow.tunnel);
5745 memset(ctx->flow.regs, 0, sizeof ctx->flow.regs);
5746 xlate_table_action(ctx, ctx->flow.in_port, 0, true);
5747 ctx->flow = old_flow;
5748 ctx->ofproto = ofproto_dpif_cast(ofport->up.ofproto);
5749
5750 if (ctx->resubmit_stats) {
b9ad7294
EJ
5751 netdev_vport_inc_tx(ofport->up.netdev, ctx->resubmit_stats);
5752 netdev_vport_inc_rx(peer->up.netdev, ctx->resubmit_stats);
0a740f48
EJ
5753 }
5754
5755 return;
5756 }
5757
a4454ac6
EJ
5758 pdscp = get_priority(ofport, ctx->flow.skb_priority);
5759 if (pdscp) {
5760 ctx->flow.nw_tos &= ~IP_DSCP_MASK;
5761 ctx->flow.nw_tos |= pdscp->dscp;
d59906fb
EJ
5762 }
5763
0a740f48 5764 odp_port = ofp_port_to_odp_port(ctx->ofproto, ofp_port);
b9ad7294
EJ
5765 if (ofport->tnl_port) {
5766 odp_port = tnl_port_send(ofport->tnl_port, &ctx->flow);
5767 if (odp_port == OVSP_NONE) {
5768 xlate_report(ctx, "Tunneling decided against output");
5769 return;
5770 }
5771
5772 if (ctx->resubmit_stats) {
5773 netdev_vport_inc_tx(ofport->up.netdev, ctx->resubmit_stats);
5774 }
5775 out_port = odp_port;
5776 commit_odp_tunnel_action(&ctx->flow, &ctx->base_flow,
5777 ctx->odp_actions);
5778 } else {
5779 out_port = vsp_realdev_to_vlandev(ctx->ofproto, odp_port,
5780 ctx->flow.vlan_tci);
5781 if (out_port != odp_port) {
5782 ctx->flow.vlan_tci = htons(0);
5783 }
52a90c29 5784 }
5bbda0aa 5785 commit_odp_actions(&ctx->flow, &ctx->base_flow, ctx->odp_actions);
52a90c29
BP
5786 nl_msg_put_u32(ctx->odp_actions, OVS_ACTION_ATTR_OUTPUT, out_port);
5787
6ff686f2
PS
5788 ctx->sflow_odp_port = odp_port;
5789 ctx->sflow_n_outputs++;
81b1afb1 5790 ctx->nf_output_iface = ofp_port;
b9ad7294 5791 ctx->flow.tunnel.tun_id = flow_tun_id;
52a90c29 5792 ctx->flow.vlan_tci = flow_vlan_tci;
8b36f51e 5793 ctx->flow.nw_tos = flow_nw_tos;
6ff686f2
PS
5794}
5795
abe529af 5796static void
5e48dc2b 5797compose_output_action(struct action_xlate_ctx *ctx, uint16_t ofp_port)
abe529af 5798{
81b1afb1 5799 compose_output_action__(ctx, ofp_port, true);
abe529af
BP
5800}
5801
5802static void
29901626 5803xlate_table_action(struct action_xlate_ctx *ctx,
1688c479 5804 uint16_t in_port, uint8_t table_id, bool may_packet_in)
abe529af
BP
5805{
5806 if (ctx->recurse < MAX_RESUBMIT_RECURSION) {
54a9cbc9 5807 struct ofproto_dpif *ofproto = ctx->ofproto;
abe529af
BP
5808 struct rule_dpif *rule;
5809 uint16_t old_in_port;
29901626
BP
5810 uint8_t old_table_id;
5811
5812 old_table_id = ctx->table_id;
5813 ctx->table_id = table_id;
abe529af 5814
54a9cbc9 5815 /* Look up a flow with 'in_port' as the input port. */
abe529af
BP
5816 old_in_port = ctx->flow.in_port;
5817 ctx->flow.in_port = in_port;
c57b2226 5818 rule = rule_dpif_lookup__(ofproto, &ctx->flow, table_id);
54a9cbc9
BP
5819
5820 /* Tag the flow. */
5821 if (table_id > 0 && table_id < N_TABLES) {
5822 struct table_dpif *table = &ofproto->tables[table_id];
5823 if (table->other_table) {
33780682 5824 ctx->tags |= (rule && rule->tag
54a9cbc9
BP
5825 ? rule->tag
5826 : rule_calculate_tag(&ctx->flow,
5cb7a798 5827 &table->other_table->mask,
54a9cbc9
BP
5828 table->basis));
5829 }
5830 }
5831
5832 /* Restore the original input port. Otherwise OFPP_NORMAL and
5833 * OFPP_IN_PORT will have surprising behavior. */
abe529af
BP
5834 ctx->flow.in_port = old_in_port;
5835
5836 if (ctx->resubmit_hook) {
5837 ctx->resubmit_hook(ctx, rule);
5838 }
5839
1688c479 5840 if (rule == NULL && may_packet_in) {
5dca28b5 5841 /* XXX
1688c479
IY
5842 * check if table configuration flags
5843 * OFPTC_TABLE_MISS_CONTROLLER, default.
5844 * OFPTC_TABLE_MISS_CONTINUE,
5845 * OFPTC_TABLE_MISS_DROP
5846 * When OF1.0, OFPTC_TABLE_MISS_CONTINUE is used. What to do?
5847 */
5848 rule = rule_dpif_miss_rule(ofproto, &ctx->flow);
5849 }
5850
abe529af 5851 if (rule) {
18b2a258 5852 struct rule_dpif *old_rule = ctx->rule;
54834960 5853
112bc5f4
BP
5854 if (ctx->resubmit_stats) {
5855 rule_credit_stats(rule, ctx->resubmit_stats);
5856 }
5857
abe529af 5858 ctx->recurse++;
18b2a258 5859 ctx->rule = rule;
f25d0cf3 5860 do_xlate_actions(rule->up.ofpacts, rule->up.ofpacts_len, ctx);
18b2a258 5861 ctx->rule = old_rule;
abe529af
BP
5862 ctx->recurse--;
5863 }
29901626
BP
5864
5865 ctx->table_id = old_table_id;
abe529af
BP
5866 } else {
5867 static struct vlog_rate_limit recurse_rl = VLOG_RATE_LIMIT_INIT(1, 1);
5868
29901626 5869 VLOG_ERR_RL(&recurse_rl, "resubmit actions recursed over %d times",
abe529af 5870 MAX_RESUBMIT_RECURSION);
6a6455e5 5871 ctx->max_resubmit_trigger = true;
abe529af
BP
5872 }
5873}
5874
29901626 5875static void
f25d0cf3
BP
5876xlate_ofpact_resubmit(struct action_xlate_ctx *ctx,
5877 const struct ofpact_resubmit *resubmit)
29901626
BP
5878{
5879 uint16_t in_port;
5880 uint8_t table_id;
5881
f25d0cf3
BP
5882 in_port = resubmit->in_port;
5883 if (in_port == OFPP_IN_PORT) {
5884 in_port = ctx->flow.in_port;
5885 }
5886
5887 table_id = resubmit->table_id;
5888 if (table_id == 255) {
5889 table_id = ctx->table_id;
5890 }
29901626 5891
1688c479 5892 xlate_table_action(ctx, in_port, table_id, false);
29901626
BP
5893}
5894
abe529af 5895static void
d59906fb 5896flood_packets(struct action_xlate_ctx *ctx, bool all)
abe529af
BP
5897{
5898 struct ofport_dpif *ofport;
5899
b3e9b2ed 5900 HMAP_FOR_EACH (ofport, up.hmap_node, &ctx->ofproto->up.ports) {
abe529af 5901 uint16_t ofp_port = ofport->up.ofp_port;
d59906fb
EJ
5902
5903 if (ofp_port == ctx->flow.in_port) {
5904 continue;
5905 }
5906
5e48dc2b 5907 if (all) {
81b1afb1 5908 compose_output_action__(ctx, ofp_port, false);
9e1fd49b 5909 } else if (!(ofport->up.pp.config & OFPUTIL_PC_NO_FLOOD)) {
5e48dc2b 5910 compose_output_action(ctx, ofp_port);
abe529af
BP
5911 }
5912 }
b3e9b2ed
EJ
5913
5914 ctx->nf_output_iface = NF_OUT_FLOOD;
abe529af
BP
5915}
5916
6ff686f2 5917static void
f0fd1a17 5918execute_controller_action(struct action_xlate_ctx *ctx, int len,
a7349929
BP
5919 enum ofp_packet_in_reason reason,
5920 uint16_t controller_id)
6ff686f2 5921{
999fba59
EJ
5922 struct ofputil_packet_in pin;
5923 struct ofpbuf *packet;
6ff686f2 5924
6a7e895f 5925 ctx->slow |= SLOW_CONTROLLER;
999fba59
EJ
5926 if (!ctx->packet) {
5927 return;
5928 }
5929
5930 packet = ofpbuf_clone(ctx->packet);
5931
5932 if (packet->l2 && packet->l3) {
5933 struct eth_header *eh;
b02475c5 5934 uint16_t mpls_depth;
999fba59
EJ
5935
5936 eth_pop_vlan(packet);
5937 eh = packet->l2;
0104aba8 5938
999fba59
EJ
5939 memcpy(eh->eth_src, ctx->flow.dl_src, sizeof eh->eth_src);
5940 memcpy(eh->eth_dst, ctx->flow.dl_dst, sizeof eh->eth_dst);
5941
5942 if (ctx->flow.vlan_tci & htons(VLAN_CFI)) {
5943 eth_push_vlan(packet, ctx->flow.vlan_tci);
5944 }
5945
b02475c5
SH
5946 mpls_depth = eth_mpls_depth(packet);
5947
5948 if (mpls_depth < ctx->flow.mpls_depth) {
5949 push_mpls(packet, ctx->flow.dl_type, ctx->flow.mpls_lse);
5950 } else if (mpls_depth > ctx->flow.mpls_depth) {
5951 pop_mpls(packet, ctx->flow.dl_type);
5952 } else if (mpls_depth) {
5953 set_mpls_lse(packet, ctx->flow.mpls_lse);
5954 }
5955
999fba59
EJ
5956 if (packet->l4) {
5957 if (ctx->flow.dl_type == htons(ETH_TYPE_IP)) {
5958 packet_set_ipv4(packet, ctx->flow.nw_src, ctx->flow.nw_dst,
5959 ctx->flow.nw_tos, ctx->flow.nw_ttl);
5960 }
5961
5962 if (packet->l7) {
5963 if (ctx->flow.nw_proto == IPPROTO_TCP) {
5964 packet_set_tcp_port(packet, ctx->flow.tp_src,
5965 ctx->flow.tp_dst);
5966 } else if (ctx->flow.nw_proto == IPPROTO_UDP) {
5967 packet_set_udp_port(packet, ctx->flow.tp_src,
5968 ctx->flow.tp_dst);
5969 }
5970 }
5971 }
5972 }
5973
5974 pin.packet = packet->data;
5975 pin.packet_len = packet->size;
f0fd1a17 5976 pin.reason = reason;
a7349929 5977 pin.controller_id = controller_id;
54834960 5978 pin.table_id = ctx->table_id;
18b2a258 5979 pin.cookie = ctx->rule ? ctx->rule->up.flow_cookie : 0;
54834960 5980
999fba59 5981 pin.send_len = len;
999fba59
EJ
5982 flow_get_metadata(&ctx->flow, &pin.fmd);
5983
d8653c38 5984 connmgr_send_packet_in(ctx->ofproto->up.connmgr, &pin);
999fba59 5985 ofpbuf_delete(packet);
6ff686f2
PS
5986}
5987
b02475c5
SH
5988static void
5989execute_mpls_push_action(struct action_xlate_ctx *ctx, ovs_be16 eth_type)
5990{
5991 ovs_assert(eth_type_mpls(eth_type));
5992
5993 if (ctx->base_flow.mpls_depth) {
5994 ctx->flow.mpls_lse &= ~htonl(MPLS_BOS_MASK);
5995 ctx->flow.mpls_depth++;
5996 } else {
5997 ovs_be32 label;
5998 uint8_t tc, ttl;
5999
6000 if (ctx->flow.dl_type == htons(ETH_TYPE_IPV6)) {
6001 label = htonl(0x2); /* IPV6 Explicit Null. */
6002 } else {
6003 label = htonl(0x0); /* IPV4 Explicit Null. */
6004 }
6005 tc = (ctx->flow.nw_tos & IP_DSCP_MASK) >> 2;
6006 ttl = ctx->flow.nw_ttl ? ctx->flow.nw_ttl : 0x40;
6007 ctx->flow.mpls_lse = set_mpls_lse_values(ttl, tc, 1, label);
6008 ctx->flow.encap_dl_type = ctx->flow.dl_type;
6009 ctx->flow.mpls_depth = 1;
6010 }
6011 ctx->flow.dl_type = eth_type;
6012}
6013
6014static void
6015execute_mpls_pop_action(struct action_xlate_ctx *ctx, ovs_be16 eth_type)
6016{
6017 ovs_assert(eth_type_mpls(ctx->flow.dl_type));
6018 ovs_assert(!eth_type_mpls(eth_type));
6019
6020 if (ctx->flow.mpls_depth) {
6021 ctx->flow.mpls_depth--;
6022 ctx->flow.mpls_lse = htonl(0);
6023 if (!ctx->flow.mpls_depth) {
6024 ctx->flow.dl_type = eth_type;
6025 ctx->flow.encap_dl_type = htons(0);
6026 }
6027 }
6028}
6029
f0fd1a17 6030static bool
c2d967a5 6031compose_dec_ttl(struct action_xlate_ctx *ctx, struct ofpact_cnt_ids *ids)
f0fd1a17
PS
6032{
6033 if (ctx->flow.dl_type != htons(ETH_TYPE_IP) &&
6034 ctx->flow.dl_type != htons(ETH_TYPE_IPV6)) {
6035 return false;
6036 }
6037
6038 if (ctx->flow.nw_ttl > 1) {
6039 ctx->flow.nw_ttl--;
6040 return false;
6041 } else {
c2d967a5
MM
6042 size_t i;
6043
6044 for (i = 0; i < ids->n_controllers; i++) {
6045 execute_controller_action(ctx, UINT16_MAX, OFPR_INVALID_TTL,
6046 ids->cnt_ids[i]);
6047 }
f0fd1a17
PS
6048
6049 /* Stop processing for current table. */
6050 return true;
6051 }
6052}
6053
abe529af 6054static void
f25d0cf3 6055xlate_output_action(struct action_xlate_ctx *ctx,
1688c479 6056 uint16_t port, uint16_t max_len, bool may_packet_in)
abe529af
BP
6057{
6058 uint16_t prev_nf_output_iface = ctx->nf_output_iface;
6059
6060 ctx->nf_output_iface = NF_OUT_DROP;
6061
6062 switch (port) {
6063 case OFPP_IN_PORT:
81b1afb1 6064 compose_output_action(ctx, ctx->flow.in_port);
abe529af
BP
6065 break;
6066 case OFPP_TABLE:
1688c479 6067 xlate_table_action(ctx, ctx->flow.in_port, 0, may_packet_in);
abe529af
BP
6068 break;
6069 case OFPP_NORMAL:
6070 xlate_normal(ctx);
6071 break;
6072 case OFPP_FLOOD:
d59906fb 6073 flood_packets(ctx, false);
abe529af
BP
6074 break;
6075 case OFPP_ALL:
d59906fb 6076 flood_packets(ctx, true);
abe529af
BP
6077 break;
6078 case OFPP_CONTROLLER:
a7349929 6079 execute_controller_action(ctx, max_len, OFPR_ACTION, 0);
abe529af 6080 break;
e81d2933
EJ
6081 case OFPP_NONE:
6082 break;
a0fbe94a 6083 case OFPP_LOCAL:
abe529af
BP
6084 default:
6085 if (port != ctx->flow.in_port) {
81b1afb1 6086 compose_output_action(ctx, port);
3dd3eace
BP
6087 } else {
6088 xlate_report(ctx, "skipping output to input port");
abe529af
BP
6089 }
6090 break;
6091 }
6092
6093 if (prev_nf_output_iface == NF_OUT_FLOOD) {
6094 ctx->nf_output_iface = NF_OUT_FLOOD;
6095 } else if (ctx->nf_output_iface == NF_OUT_DROP) {
6096 ctx->nf_output_iface = prev_nf_output_iface;
6097 } else if (prev_nf_output_iface != NF_OUT_DROP &&
6098 ctx->nf_output_iface != NF_OUT_FLOOD) {
6099 ctx->nf_output_iface = NF_OUT_MULTI;
6100 }
6101}
6102
f694937d
EJ
6103static void
6104xlate_output_reg_action(struct action_xlate_ctx *ctx,
f25d0cf3 6105 const struct ofpact_output_reg *or)
f694937d 6106{
f25d0cf3
BP
6107 uint64_t port = mf_get_subfield(&or->src, &ctx->flow);
6108 if (port <= UINT16_MAX) {
1688c479 6109 xlate_output_action(ctx, port, or->max_len, false);
f694937d
EJ
6110 }
6111}
6112
abe529af
BP
6113static void
6114xlate_enqueue_action(struct action_xlate_ctx *ctx,
f25d0cf3 6115 const struct ofpact_enqueue *enqueue)
abe529af 6116{
f25d0cf3
BP
6117 uint16_t ofp_port = enqueue->port;
6118 uint32_t queue_id = enqueue->queue;
abff858b 6119 uint32_t flow_priority, priority;
abe529af
BP
6120 int error;
6121
f25d0cf3 6122 /* Translate queue to priority. */
acf60855
JP
6123 error = dpif_queue_to_priority(ctx->ofproto->backer->dpif,
6124 queue_id, &priority);
abe529af
BP
6125 if (error) {
6126 /* Fall back to ordinary output action. */
1688c479 6127 xlate_output_action(ctx, enqueue->port, 0, false);
abe529af
BP
6128 return;
6129 }
6130
f25d0cf3 6131 /* Check output port. */
abe529af
BP
6132 if (ofp_port == OFPP_IN_PORT) {
6133 ofp_port = ctx->flow.in_port;
8ba855c1
BP
6134 } else if (ofp_port == ctx->flow.in_port) {
6135 return;
abe529af 6136 }
abe529af 6137
df2c07f4 6138 /* Add datapath actions. */
deedf7e7
BP
6139 flow_priority = ctx->flow.skb_priority;
6140 ctx->flow.skb_priority = priority;
81b1afb1 6141 compose_output_action(ctx, ofp_port);
deedf7e7 6142 ctx->flow.skb_priority = flow_priority;
abe529af
BP
6143
6144 /* Update NetFlow output port. */
6145 if (ctx->nf_output_iface == NF_OUT_DROP) {
4b23aebf 6146 ctx->nf_output_iface = ofp_port;
abe529af
BP
6147 } else if (ctx->nf_output_iface != NF_OUT_FLOOD) {
6148 ctx->nf_output_iface = NF_OUT_MULTI;
6149 }
6150}
6151
6152static void
f25d0cf3 6153xlate_set_queue_action(struct action_xlate_ctx *ctx, uint32_t queue_id)
abe529af 6154{
f25d0cf3 6155 uint32_t skb_priority;
abe529af 6156
acf60855
JP
6157 if (!dpif_queue_to_priority(ctx->ofproto->backer->dpif,
6158 queue_id, &skb_priority)) {
f25d0cf3
BP
6159 ctx->flow.skb_priority = skb_priority;
6160 } else {
6161 /* Couldn't translate queue to a priority. Nothing to do. A warning
abe529af 6162 * has already been logged. */
abe529af 6163 }
abe529af
BP
6164}
6165
6166struct xlate_reg_state {
6167 ovs_be16 vlan_tci;
6168 ovs_be64 tun_id;
6169};
6170
daff3353
EJ
6171static bool
6172slave_enabled_cb(uint16_t ofp_port, void *ofproto_)
6173{
6174 struct ofproto_dpif *ofproto = ofproto_;
6175 struct ofport_dpif *port;
6176
6177 switch (ofp_port) {
6178 case OFPP_IN_PORT:
6179 case OFPP_TABLE:
6180 case OFPP_NORMAL:
6181 case OFPP_FLOOD:
6182 case OFPP_ALL:
439e4d8c 6183 case OFPP_NONE:
daff3353
EJ
6184 return true;
6185 case OFPP_CONTROLLER: /* Not supported by the bundle action. */
6186 return false;
6187 default:
6188 port = get_ofp_port(ofproto, ofp_port);
6189 return port ? port->may_enable : false;
6190 }
6191}
6192
f25d0cf3
BP
6193static void
6194xlate_bundle_action(struct action_xlate_ctx *ctx,
6195 const struct ofpact_bundle *bundle)
6196{
6197 uint16_t port;
6198
6199 port = bundle_execute(bundle, &ctx->flow, slave_enabled_cb, ctx->ofproto);
6200 if (bundle->dst.field) {
6201 nxm_reg_load(&bundle->dst, port, &ctx->flow);
6202 } else {
1688c479 6203 xlate_output_action(ctx, port, 0, false);
f25d0cf3
BP
6204 }
6205}
6206
75a75043
BP
6207static void
6208xlate_learn_action(struct action_xlate_ctx *ctx,
f25d0cf3 6209 const struct ofpact_learn *learn)
75a75043
BP
6210{
6211 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 1);
6212 struct ofputil_flow_mod fm;
f25d0cf3
BP
6213 uint64_t ofpacts_stub[1024 / 8];
6214 struct ofpbuf ofpacts;
75a75043
BP
6215 int error;
6216
f25d0cf3
BP
6217 ofpbuf_use_stack(&ofpacts, ofpacts_stub, sizeof ofpacts_stub);
6218 learn_execute(learn, &ctx->flow, &fm, &ofpacts);
75a75043
BP
6219
6220 error = ofproto_flow_mod(&ctx->ofproto->up, &fm);
6221 if (error && !VLOG_DROP_WARN(&rl)) {
90bf1e07
BP
6222 VLOG_WARN("learning action failed to modify flow table (%s)",
6223 ofperr_get_name(error));
75a75043
BP
6224 }
6225
f25d0cf3 6226 ofpbuf_uninit(&ofpacts);
75a75043
BP
6227}
6228
0e553d9c
BP
6229/* Reduces '*timeout' to no more than 'max'. A value of zero in either case
6230 * means "infinite". */
6231static void
6232reduce_timeout(uint16_t max, uint16_t *timeout)
6233{
6234 if (max && (!*timeout || *timeout > max)) {
6235 *timeout = max;
6236 }
6237}
6238
6239static void
6240xlate_fin_timeout(struct action_xlate_ctx *ctx,
f25d0cf3 6241 const struct ofpact_fin_timeout *oft)
0e553d9c
BP
6242{
6243 if (ctx->tcp_flags & (TCP_FIN | TCP_RST) && ctx->rule) {
6244 struct rule_dpif *rule = ctx->rule;
6245
f25d0cf3
BP
6246 reduce_timeout(oft->fin_idle_timeout, &rule->up.idle_timeout);
6247 reduce_timeout(oft->fin_hard_timeout, &rule->up.hard_timeout);
0e553d9c
BP
6248 }
6249}
6250
21f7563c
JP
6251static bool
6252may_receive(const struct ofport_dpif *port, struct action_xlate_ctx *ctx)
6253{
9e1fd49b
BP
6254 if (port->up.pp.config & (eth_addr_equals(ctx->flow.dl_dst, eth_addr_stp)
6255 ? OFPUTIL_PC_NO_RECV_STP
6256 : OFPUTIL_PC_NO_RECV)) {
21f7563c
JP
6257 return false;
6258 }
6259
6260 /* Only drop packets here if both forwarding and learning are
6261 * disabled. If just learning is enabled, we need to have
6262 * OFPP_NORMAL and the learning action have a look at the packet
6263 * before we can drop it. */
6264 if (!stp_forward_in_state(port->stp_state)
6265 && !stp_learn_in_state(port->stp_state)) {
6266 return false;
6267 }
6268
6269 return true;
6270}
6271
abe529af 6272static void
f25d0cf3 6273do_xlate_actions(const struct ofpact *ofpacts, size_t ofpacts_len,
abe529af
BP
6274 struct action_xlate_ctx *ctx)
6275{
6276 const struct ofport_dpif *port;
254750ce 6277 bool was_evictable = true;
f25d0cf3 6278 const struct ofpact *a;
abe529af
BP
6279
6280 port = get_ofp_port(ctx->ofproto, ctx->flow.in_port);
21f7563c 6281 if (port && !may_receive(port, ctx)) {
abe529af
BP
6282 /* Drop this flow. */
6283 return;
6284 }
6285
254750ce
BP
6286 if (ctx->rule) {
6287 /* Don't let the rule we're working on get evicted underneath us. */
6288 was_evictable = ctx->rule->up.evictable;
6289 ctx->rule->up.evictable = false;
6290 }
f25d0cf3
BP
6291 OFPACT_FOR_EACH (a, ofpacts, ofpacts_len) {
6292 struct ofpact_controller *controller;
4cceacb9 6293 const struct ofpact_metadata *metadata;
38f2e360 6294
848e8809
EJ
6295 if (ctx->exit) {
6296 break;
6297 }
6298
f25d0cf3
BP
6299 switch (a->type) {
6300 case OFPACT_OUTPUT:
6301 xlate_output_action(ctx, ofpact_get_OUTPUT(a)->port,
1688c479 6302 ofpact_get_OUTPUT(a)->max_len, true);
f25d0cf3
BP
6303 break;
6304
6305 case OFPACT_CONTROLLER:
6306 controller = ofpact_get_CONTROLLER(a);
6307 execute_controller_action(ctx, controller->max_len,
6308 controller->reason,
6309 controller->controller_id);
6310 break;
690a61c5 6311
f25d0cf3
BP
6312 case OFPACT_ENQUEUE:
6313 xlate_enqueue_action(ctx, ofpact_get_ENQUEUE(a));
abe529af
BP
6314 break;
6315
f25d0cf3 6316 case OFPACT_SET_VLAN_VID:
abe529af 6317 ctx->flow.vlan_tci &= ~htons(VLAN_VID_MASK);
f25d0cf3
BP
6318 ctx->flow.vlan_tci |= (htons(ofpact_get_SET_VLAN_VID(a)->vlan_vid)
6319 | htons(VLAN_CFI));
abe529af
BP
6320 break;
6321
f25d0cf3 6322 case OFPACT_SET_VLAN_PCP:
abe529af 6323 ctx->flow.vlan_tci &= ~htons(VLAN_PCP_MASK);
f25d0cf3
BP
6324 ctx->flow.vlan_tci |= htons((ofpact_get_SET_VLAN_PCP(a)->vlan_pcp
6325 << VLAN_PCP_SHIFT)
6326 | VLAN_CFI);
abe529af
BP
6327 break;
6328
f25d0cf3 6329 case OFPACT_STRIP_VLAN:
abe529af 6330 ctx->flow.vlan_tci = htons(0);
abe529af
BP
6331 break;
6332
3e34fbdd 6333 case OFPACT_PUSH_VLAN:
5dca28b5 6334 /* XXX 802.1AD(QinQ) */
3e34fbdd
IY
6335 ctx->flow.vlan_tci = htons(VLAN_CFI);
6336 break;
6337
f25d0cf3
BP
6338 case OFPACT_SET_ETH_SRC:
6339 memcpy(ctx->flow.dl_src, ofpact_get_SET_ETH_SRC(a)->mac,
6340 ETH_ADDR_LEN);
abe529af
BP
6341 break;
6342
f25d0cf3
BP
6343 case OFPACT_SET_ETH_DST:
6344 memcpy(ctx->flow.dl_dst, ofpact_get_SET_ETH_DST(a)->mac,
6345 ETH_ADDR_LEN);
abe529af
BP
6346 break;
6347
f25d0cf3
BP
6348 case OFPACT_SET_IPV4_SRC:
6349 ctx->flow.nw_src = ofpact_get_SET_IPV4_SRC(a)->ipv4;
abe529af
BP
6350 break;
6351
f25d0cf3
BP
6352 case OFPACT_SET_IPV4_DST:
6353 ctx->flow.nw_dst = ofpact_get_SET_IPV4_DST(a)->ipv4;
abe529af
BP
6354 break;
6355
f25d0cf3 6356 case OFPACT_SET_IPV4_DSCP:
c4f2731d
PS
6357 /* OpenFlow 1.0 only supports IPv4. */
6358 if (ctx->flow.dl_type == htons(ETH_TYPE_IP)) {
6359 ctx->flow.nw_tos &= ~IP_DSCP_MASK;
f25d0cf3 6360 ctx->flow.nw_tos |= ofpact_get_SET_IPV4_DSCP(a)->dscp;
c4f2731d 6361 }
abe529af
BP
6362 break;
6363
f25d0cf3
BP
6364 case OFPACT_SET_L4_SRC_PORT:
6365 ctx->flow.tp_src = htons(ofpact_get_SET_L4_SRC_PORT(a)->port);
abe529af
BP
6366 break;
6367
f25d0cf3
BP
6368 case OFPACT_SET_L4_DST_PORT:
6369 ctx->flow.tp_dst = htons(ofpact_get_SET_L4_DST_PORT(a)->port);
abe529af
BP
6370 break;
6371
f25d0cf3
BP
6372 case OFPACT_RESUBMIT:
6373 xlate_ofpact_resubmit(ctx, ofpact_get_RESUBMIT(a));
38f2e360
BP
6374 break;
6375
f25d0cf3 6376 case OFPACT_SET_TUNNEL:
296e07ac 6377 ctx->flow.tunnel.tun_id = htonll(ofpact_get_SET_TUNNEL(a)->tun_id);
29901626
BP
6378 break;
6379
f25d0cf3
BP
6380 case OFPACT_SET_QUEUE:
6381 xlate_set_queue_action(ctx, ofpact_get_SET_QUEUE(a)->queue_id);
abe529af
BP
6382 break;
6383
f25d0cf3 6384 case OFPACT_POP_QUEUE:
deedf7e7 6385 ctx->flow.skb_priority = ctx->orig_skb_priority;
38f2e360
BP
6386 break;
6387
f25d0cf3
BP
6388 case OFPACT_REG_MOVE:
6389 nxm_execute_reg_move(ofpact_get_REG_MOVE(a), &ctx->flow);
38f2e360
BP
6390 break;
6391
f25d0cf3
BP
6392 case OFPACT_REG_LOAD:
6393 nxm_execute_reg_load(ofpact_get_REG_LOAD(a), &ctx->flow);
38f2e360
BP
6394 break;
6395
b02475c5
SH
6396 case OFPACT_PUSH_MPLS:
6397 execute_mpls_push_action(ctx, ofpact_get_PUSH_MPLS(a)->ethertype);
6398 break;
6399
6400 case OFPACT_POP_MPLS:
6401 execute_mpls_pop_action(ctx, ofpact_get_POP_MPLS(a)->ethertype);
6402 break;
6403
f25d0cf3 6404 case OFPACT_DEC_TTL:
c2d967a5 6405 if (compose_dec_ttl(ctx, ofpact_get_DEC_TTL(a))) {
f25d0cf3
BP
6406 goto out;
6407 }
38f2e360
BP
6408 break;
6409
f25d0cf3
BP
6410 case OFPACT_NOTE:
6411 /* Nothing to do. */
abe529af
BP
6412 break;
6413
f25d0cf3
BP
6414 case OFPACT_MULTIPATH:
6415 multipath_execute(ofpact_get_MULTIPATH(a), &ctx->flow);
abe529af 6416 break;
daff3353 6417
f25d0cf3 6418 case OFPACT_BUNDLE:
a368bb53 6419 ctx->ofproto->has_bundle_action = true;
f25d0cf3 6420 xlate_bundle_action(ctx, ofpact_get_BUNDLE(a));
a368bb53 6421 break;
f694937d 6422
f25d0cf3
BP
6423 case OFPACT_OUTPUT_REG:
6424 xlate_output_reg_action(ctx, ofpact_get_OUTPUT_REG(a));
f694937d 6425 break;
75a75043 6426
f25d0cf3 6427 case OFPACT_LEARN:
75a75043 6428 ctx->has_learn = true;
3de9590b 6429 if (ctx->may_learn) {
f25d0cf3 6430 xlate_learn_action(ctx, ofpact_get_LEARN(a));
75a75043
BP
6431 }
6432 break;
848e8809 6433
f25d0cf3 6434 case OFPACT_EXIT:
848e8809
EJ
6435 ctx->exit = true;
6436 break;
0e553d9c 6437
f25d0cf3 6438 case OFPACT_FIN_TIMEOUT:
0e553d9c 6439 ctx->has_fin_timeout = true;
f25d0cf3 6440 xlate_fin_timeout(ctx, ofpact_get_FIN_TIMEOUT(a));
a7349929 6441 break;
8dd54666 6442
b19e8793 6443 case OFPACT_CLEAR_ACTIONS:
5dca28b5 6444 /* XXX
b19e8793
IY
6445 * Nothing to do because writa-actions is not supported for now.
6446 * When writa-actions is supported, clear-actions also must
6447 * be supported at the same time.
6448 */
6449 break;
6450
4cceacb9
JS
6451 case OFPACT_WRITE_METADATA:
6452 metadata = ofpact_get_WRITE_METADATA(a);
6453 ctx->flow.metadata &= ~metadata->mask;
6454 ctx->flow.metadata |= metadata->metadata & metadata->mask;
6455 break;
6456
8dd54666 6457 case OFPACT_GOTO_TABLE: {
5dca28b5 6458 /* XXX remove recursion */
8dd54666
IY
6459 /* It is assumed that goto-table is last action */
6460 struct ofpact_goto_table *ogt = ofpact_get_GOTO_TABLE(a);
cb22974d 6461 ovs_assert(ctx->table_id < ogt->table_id);
8dd54666
IY
6462 xlate_table_action(ctx, ctx->flow.in_port, ogt->table_id, true);
6463 break;
6464 }
abe529af
BP
6465 }
6466 }
21f7563c 6467
f0fd1a17 6468out:
21f7563c
JP
6469 /* We've let OFPP_NORMAL and the learning action look at the packet,
6470 * so drop it now if forwarding is disabled. */
6471 if (port && !stp_forward_in_state(port->stp_state)) {
6472 ofpbuf_clear(ctx->odp_actions);
6473 add_sflow_action(ctx);
6474 }
254750ce
BP
6475 if (ctx->rule) {
6476 ctx->rule->up.evictable = was_evictable;
6477 }
abe529af
BP
6478}
6479
6480static void
6481action_xlate_ctx_init(struct action_xlate_ctx *ctx,
6482 struct ofproto_dpif *ofproto, const struct flow *flow,
18b2a258 6483 ovs_be16 initial_tci, struct rule_dpif *rule,
0e553d9c 6484 uint8_t tcp_flags, const struct ofpbuf *packet)
abe529af 6485{
ef506a7c
JG
6486 ovs_be64 initial_tun_id = flow->tunnel.tun_id;
6487
6488 /* Flow initialization rules:
6489 * - 'base_flow' must match the kernel's view of the packet at the
6490 * time that action processing starts. 'flow' represents any
6491 * transformations we wish to make through actions.
6492 * - By default 'base_flow' and 'flow' are the same since the input
6493 * packet matches the output before any actions are applied.
6494 * - When using VLAN splinters, 'base_flow''s VLAN is set to the value
6495 * of the received packet as seen by the kernel. If we later output
6496 * to another device without any modifications this will cause us to
6497 * insert a new tag since the original one was stripped off by the
6498 * VLAN device.
6499 * - Tunnel 'flow' is largely cleared when transitioning between
6500 * the input and output stages since it does not make sense to output
6501 * a packet with the exact headers that it was received with (i.e.
6502 * the destination IP is us). The one exception is the tun_id, which
6503 * is preserved to allow use in later resubmit lookups and loads into
6504 * registers.
6505 * - Tunnel 'base_flow' is completely cleared since that is what the
6506 * kernel does. If we wish to maintain the original values an action
6507 * needs to be generated. */
6508
abe529af
BP
6509 ctx->ofproto = ofproto;
6510 ctx->flow = *flow;
47d4a9db 6511 memset(&ctx->flow.tunnel, 0, sizeof ctx->flow.tunnel);
e84173dc 6512 ctx->base_flow = ctx->flow;
e84173dc 6513 ctx->base_flow.vlan_tci = initial_tci;
ef506a7c 6514 ctx->flow.tunnel.tun_id = initial_tun_id;
18b2a258 6515 ctx->rule = rule;
abe529af 6516 ctx->packet = packet;
3de9590b 6517 ctx->may_learn = packet != NULL;
0e553d9c 6518 ctx->tcp_flags = tcp_flags;
abe529af 6519 ctx->resubmit_hook = NULL;
479df176 6520 ctx->report_hook = NULL;
112bc5f4 6521 ctx->resubmit_stats = NULL;
abe529af
BP
6522}
6523
f25d0cf3
BP
6524/* Translates the 'ofpacts_len' bytes of "struct ofpacts" starting at 'ofpacts'
6525 * into datapath actions in 'odp_actions', using 'ctx'. */
050ac423 6526static void
abe529af 6527xlate_actions(struct action_xlate_ctx *ctx,
f25d0cf3 6528 const struct ofpact *ofpacts, size_t ofpacts_len,
050ac423 6529 struct ofpbuf *odp_actions)
abe529af 6530{
43d50bc8
BP
6531 /* Normally false. Set to true if we ever hit MAX_RESUBMIT_RECURSION, so
6532 * that in the future we always keep a copy of the original flow for
6533 * tracing purposes. */
6534 static bool hit_resubmit_limit;
6535
6a7e895f
BP
6536 enum slow_path_reason special;
6537
abe529af
BP
6538 COVERAGE_INC(ofproto_dpif_xlate);
6539
050ac423
BP
6540 ofpbuf_clear(odp_actions);
6541 ofpbuf_reserve(odp_actions, NL_A_U32_SIZE);
6542
6543 ctx->odp_actions = odp_actions;
97e42c92 6544 ctx->tags = 0;
6a7e895f 6545 ctx->slow = 0;
97e42c92
BP
6546 ctx->has_learn = false;
6547 ctx->has_normal = false;
0e553d9c 6548 ctx->has_fin_timeout = false;
97e42c92 6549 ctx->nf_output_iface = NF_OUT_DROP;
9d24de3b 6550 ctx->mirrors = 0;
97e42c92 6551 ctx->recurse = 0;
6a6455e5 6552 ctx->max_resubmit_trigger = false;
deedf7e7 6553 ctx->orig_skb_priority = ctx->flow.skb_priority;
97e42c92 6554 ctx->table_id = 0;
848e8809 6555 ctx->exit = false;
7257b535 6556
43d50bc8 6557 if (ctx->ofproto->has_mirrors || hit_resubmit_limit) {
ccb7c863
BP
6558 /* Do this conditionally because the copy is expensive enough that it
6559 * shows up in profiles.
6560 *
6561 * We keep orig_flow in 'ctx' only because I couldn't make GCC 4.4
6562 * believe that I wasn't using it without initializing it if I kept it
6563 * in a local variable. */
6564 ctx->orig_flow = ctx->flow;
6565 }
6566
eadef313 6567 if (ctx->flow.nw_frag & FLOW_NW_FRAG_ANY) {
7257b535
BP
6568 switch (ctx->ofproto->up.frag_handling) {
6569 case OFPC_FRAG_NORMAL:
6570 /* We must pretend that transport ports are unavailable. */
97e42c92
BP
6571 ctx->flow.tp_src = ctx->base_flow.tp_src = htons(0);
6572 ctx->flow.tp_dst = ctx->base_flow.tp_dst = htons(0);
7257b535
BP
6573 break;
6574
6575 case OFPC_FRAG_DROP:
050ac423 6576 return;
7257b535
BP
6577
6578 case OFPC_FRAG_REASM:
6579 NOT_REACHED();
6580
6581 case OFPC_FRAG_NX_MATCH:
6582 /* Nothing to do. */
6583 break;
f0fd1a17
PS
6584
6585 case OFPC_INVALID_TTL_TO_CONTROLLER:
6586 NOT_REACHED();
7257b535
BP
6587 }
6588 }
6589
6a7e895f
BP
6590 special = process_special(ctx->ofproto, &ctx->flow, ctx->packet);
6591 if (special) {
6592 ctx->slow |= special;
abe529af 6593 } else {
6a6455e5 6594 static struct vlog_rate_limit trace_rl = VLOG_RATE_LIMIT_INIT(1, 1);
6a6455e5 6595 ovs_be16 initial_tci = ctx->base_flow.vlan_tci;
ee382d89 6596 uint32_t local_odp_port;
6a6455e5 6597
6ff686f2 6598 add_sflow_action(ctx);
f25d0cf3 6599 do_xlate_actions(ofpacts, ofpacts_len, ctx);
abe529af 6600
43d50bc8
BP
6601 if (ctx->max_resubmit_trigger && !ctx->resubmit_hook) {
6602 if (!hit_resubmit_limit) {
6603 /* We didn't record the original flow. Make sure we do from
6604 * now on. */
6605 hit_resubmit_limit = true;
6606 } else if (!VLOG_DROP_ERR(&trace_rl)) {
6607 struct ds ds = DS_EMPTY_INITIALIZER;
6608
6609 ofproto_trace(ctx->ofproto, &ctx->orig_flow, ctx->packet,
6610 initial_tci, &ds);
6611 VLOG_ERR("Trace triggered by excessive resubmit "
6612 "recursion:\n%s", ds_cstr(&ds));
6613 ds_destroy(&ds);
6614 }
6a6455e5
EJ
6615 }
6616
ee382d89 6617 local_odp_port = ofp_port_to_odp_port(ctx->ofproto, OFPP_LOCAL);
b6848f13 6618 if (!connmgr_may_set_up_flow(ctx->ofproto->up.connmgr, &ctx->flow,
ee382d89 6619 local_odp_port,
b6848f13
BP
6620 ctx->odp_actions->data,
6621 ctx->odp_actions->size)) {
6a7e895f 6622 ctx->slow |= SLOW_IN_BAND;
b6848f13
BP
6623 if (ctx->packet
6624 && connmgr_msg_in_hook(ctx->ofproto->up.connmgr, &ctx->flow,
6625 ctx->packet)) {
5e48dc2b 6626 compose_output_action(ctx, OFPP_LOCAL);
b6848f13
BP
6627 }
6628 }
ccb7c863
BP
6629 if (ctx->ofproto->has_mirrors) {
6630 add_mirror_actions(ctx, &ctx->orig_flow);
6631 }
a7c4eaf6 6632 fix_sflow_action(ctx);
abe529af 6633 }
050ac423
BP
6634}
6635
f25d0cf3
BP
6636/* Translates the 'ofpacts_len' bytes of "struct ofpact"s starting at 'ofpacts'
6637 * into datapath actions, using 'ctx', and discards the datapath actions. */
050ac423
BP
6638static void
6639xlate_actions_for_side_effects(struct action_xlate_ctx *ctx,
f25d0cf3
BP
6640 const struct ofpact *ofpacts,
6641 size_t ofpacts_len)
050ac423
BP
6642{
6643 uint64_t odp_actions_stub[1024 / 8];
6644 struct ofpbuf odp_actions;
abe529af 6645
050ac423 6646 ofpbuf_use_stub(&odp_actions, odp_actions_stub, sizeof odp_actions_stub);
f25d0cf3 6647 xlate_actions(ctx, ofpacts, ofpacts_len, &odp_actions);
050ac423 6648 ofpbuf_uninit(&odp_actions);
abe529af 6649}
479df176
BP
6650
6651static void
6652xlate_report(struct action_xlate_ctx *ctx, const char *s)
6653{
6654 if (ctx->report_hook) {
6655 ctx->report_hook(ctx, s);
6656 }
6657}
abe529af
BP
6658\f
6659/* OFPP_NORMAL implementation. */
6660
abe529af
BP
6661static struct ofport_dpif *ofbundle_get_a_port(const struct ofbundle *);
6662
ecac4ebf
BP
6663/* Given 'vid', the VID obtained from the 802.1Q header that was received as
6664 * part of a packet (specify 0 if there was no 802.1Q header), and 'in_bundle',
6665 * the bundle on which the packet was received, returns the VLAN to which the
6666 * packet belongs.
6667 *
6668 * Both 'vid' and the return value are in the range 0...4095. */
6669static uint16_t
6670input_vid_to_vlan(const struct ofbundle *in_bundle, uint16_t vid)
6671{
6672 switch (in_bundle->vlan_mode) {
6673 case PORT_VLAN_ACCESS:
6674 return in_bundle->vlan;
6675 break;
6676
6677 case PORT_VLAN_TRUNK:
6678 return vid;
6679
6680 case PORT_VLAN_NATIVE_UNTAGGED:
6681 case PORT_VLAN_NATIVE_TAGGED:
6682 return vid ? vid : in_bundle->vlan;
6683
6684 default:
6685 NOT_REACHED();
6686 }
6687}
6688
5da5ec37
BP
6689/* Checks whether a packet with the given 'vid' may ingress on 'in_bundle'.
6690 * If so, returns true. Otherwise, returns false and, if 'warn' is true, logs
6691 * a warning.
6692 *
6693 * 'vid' should be the VID obtained from the 802.1Q header that was received as
6694 * part of a packet (specify 0 if there was no 802.1Q header), in the range
6695 * 0...4095. */
6696static bool
6697input_vid_is_valid(uint16_t vid, struct ofbundle *in_bundle, bool warn)
6698{
33158a18
JP
6699 /* Allow any VID on the OFPP_NONE port. */
6700 if (in_bundle == &ofpp_none_bundle) {
6701 return true;
6702 }
6703
5da5ec37
BP
6704 switch (in_bundle->vlan_mode) {
6705 case PORT_VLAN_ACCESS:
6706 if (vid) {
6707 if (warn) {
6708 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
6709 VLOG_WARN_RL(&rl, "bridge %s: dropping VLAN %"PRIu16" tagged "
6710 "packet received on port %s configured as VLAN "
6711 "%"PRIu16" access port",
6712 in_bundle->ofproto->up.name, vid,
6713 in_bundle->name, in_bundle->vlan);
6714 }
6715 return false;
6716 }
6717 return true;
6718
6719 case PORT_VLAN_NATIVE_UNTAGGED:
6720 case PORT_VLAN_NATIVE_TAGGED:
6721 if (!vid) {
6722 /* Port must always carry its native VLAN. */
6723 return true;
6724 }
6725 /* Fall through. */
6726 case PORT_VLAN_TRUNK:
6727 if (!ofbundle_includes_vlan(in_bundle, vid)) {
6728 if (warn) {
6729 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
6730 VLOG_WARN_RL(&rl, "bridge %s: dropping VLAN %"PRIu16" packet "
6731 "received on port %s not configured for trunking "
6732 "VLAN %"PRIu16,
6733 in_bundle->ofproto->up.name, vid,
6734 in_bundle->name, vid);
6735 }
6736 return false;
6737 }
6738 return true;
6739
6740 default:
6741 NOT_REACHED();
6742 }
6743
6744}
6745
ecac4ebf
BP
6746/* Given 'vlan', the VLAN that a packet belongs to, and
6747 * 'out_bundle', a bundle on which the packet is to be output, returns the VID
6748 * that should be included in the 802.1Q header. (If the return value is 0,
6749 * then the 802.1Q header should only be included in the packet if there is a
6750 * nonzero PCP.)
6751 *
6752 * Both 'vlan' and the return value are in the range 0...4095. */
6753static uint16_t
6754output_vlan_to_vid(const struct ofbundle *out_bundle, uint16_t vlan)
6755{
6756 switch (out_bundle->vlan_mode) {
6757 case PORT_VLAN_ACCESS:
6758 return 0;
6759
6760 case PORT_VLAN_TRUNK:
6761 case PORT_VLAN_NATIVE_TAGGED:
6762 return vlan;
6763
6764 case PORT_VLAN_NATIVE_UNTAGGED:
6765 return vlan == out_bundle->vlan ? 0 : vlan;
6766
6767 default:
6768 NOT_REACHED();
6769 }
6770}
6771
395e68ce
BP
6772static void
6773output_normal(struct action_xlate_ctx *ctx, const struct ofbundle *out_bundle,
6774 uint16_t vlan)
abe529af 6775{
395e68ce
BP
6776 struct ofport_dpif *port;
6777 uint16_t vid;
81b1afb1 6778 ovs_be16 tci, old_tci;
ecac4ebf 6779
395e68ce
BP
6780 vid = output_vlan_to_vid(out_bundle, vlan);
6781 if (!out_bundle->bond) {
6782 port = ofbundle_get_a_port(out_bundle);
6783 } else {
6784 port = bond_choose_output_slave(out_bundle->bond, &ctx->flow,
6785 vid, &ctx->tags);
6786 if (!port) {
6787 /* No slaves enabled, so drop packet. */
6788 return;
6789 }
6790 }
abe529af 6791
81b1afb1 6792 old_tci = ctx->flow.vlan_tci;
5e9ceccd
BP
6793 tci = htons(vid);
6794 if (tci || out_bundle->use_priority_tags) {
6795 tci |= ctx->flow.vlan_tci & htons(VLAN_PCP_MASK);
6796 if (tci) {
6797 tci |= htons(VLAN_CFI);
6798 }
395e68ce 6799 }
81b1afb1 6800 ctx->flow.vlan_tci = tci;
395e68ce 6801
5e48dc2b 6802 compose_output_action(ctx, port->up.ofp_port);
81b1afb1 6803 ctx->flow.vlan_tci = old_tci;
abe529af
BP
6804}
6805
6806static int
6807mirror_mask_ffs(mirror_mask_t mask)
6808{
6809 BUILD_ASSERT_DECL(sizeof(unsigned int) >= sizeof(mask));
6810 return ffs(mask);
6811}
6812
abe529af
BP
6813static bool
6814ofbundle_trunks_vlan(const struct ofbundle *bundle, uint16_t vlan)
6815{
ecac4ebf 6816 return (bundle->vlan_mode != PORT_VLAN_ACCESS
fc3d7408 6817 && (!bundle->trunks || bitmap_is_set(bundle->trunks, vlan)));
abe529af
BP
6818}
6819
6820static bool
6821ofbundle_includes_vlan(const struct ofbundle *bundle, uint16_t vlan)
6822{
6823 return vlan == bundle->vlan || ofbundle_trunks_vlan(bundle, vlan);
6824}
6825
6826/* Returns an arbitrary interface within 'bundle'. */
6827static struct ofport_dpif *
6828ofbundle_get_a_port(const struct ofbundle *bundle)
6829{
6830 return CONTAINER_OF(list_front(&bundle->ports),
6831 struct ofport_dpif, bundle_node);
6832}
6833
abe529af
BP
6834static bool
6835vlan_is_mirrored(const struct ofmirror *m, int vlan)
6836{
fc3d7408 6837 return !m->vlans || bitmap_is_set(m->vlans, vlan);
abe529af
BP
6838}
6839
6840static void
c06bba01 6841add_mirror_actions(struct action_xlate_ctx *ctx, const struct flow *orig_flow)
abe529af
BP
6842{
6843 struct ofproto_dpif *ofproto = ctx->ofproto;
6844 mirror_mask_t mirrors;
c06bba01
JP
6845 struct ofbundle *in_bundle;
6846 uint16_t vlan;
6847 uint16_t vid;
6848 const struct nlattr *a;
6849 size_t left;
6850
3581c12c 6851 in_bundle = lookup_input_bundle(ctx->ofproto, orig_flow->in_port,
70c2fd56 6852 ctx->packet != NULL, NULL);
3581c12c 6853 if (!in_bundle) {
c06bba01
JP
6854 return;
6855 }
c06bba01
JP
6856 mirrors = in_bundle->src_mirrors;
6857
6858 /* Drop frames on bundles reserved for mirroring. */
6859 if (in_bundle->mirror_out) {
6860 if (ctx->packet != NULL) {
6861 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
6862 VLOG_WARN_RL(&rl, "bridge %s: dropping packet received on port "
6863 "%s, which is reserved exclusively for mirroring",
6864 ctx->ofproto->up.name, in_bundle->name);
6865 }
6866 return;
6867 }
6868
6869 /* Check VLAN. */
6870 vid = vlan_tci_to_vid(orig_flow->vlan_tci);
6871 if (!input_vid_is_valid(vid, in_bundle, ctx->packet != NULL)) {
6872 return;
6873 }
6874 vlan = input_vid_to_vlan(in_bundle, vid);
6875
6876 /* Look at the output ports to check for destination selections. */
6877
6878 NL_ATTR_FOR_EACH (a, left, ctx->odp_actions->data,
6879 ctx->odp_actions->size) {
6880 enum ovs_action_attr type = nl_attr_type(a);
6881 struct ofport_dpif *ofport;
6882
6883 if (type != OVS_ACTION_ATTR_OUTPUT) {
6884 continue;
6885 }
6886
6887 ofport = get_odp_port(ofproto, nl_attr_get_u32(a));
521472bc
BP
6888 if (ofport && ofport->bundle) {
6889 mirrors |= ofport->bundle->dst_mirrors;
6890 }
c06bba01 6891 }
abe529af
BP
6892
6893 if (!mirrors) {
6894 return;
6895 }
6896
c06bba01
JP
6897 /* Restore the original packet before adding the mirror actions. */
6898 ctx->flow = *orig_flow;
6899
9ba15e2a
BP
6900 while (mirrors) {
6901 struct ofmirror *m;
9ba15e2a
BP
6902
6903 m = ofproto->mirrors[mirror_mask_ffs(mirrors) - 1];
6904
6905 if (!vlan_is_mirrored(m, vlan)) {
8472a3ce 6906 mirrors = zero_rightmost_1bit(mirrors);
9ba15e2a
BP
6907 continue;
6908 }
6909
6910 mirrors &= ~m->dup_mirrors;
9d24de3b 6911 ctx->mirrors |= m->dup_mirrors;
9ba15e2a 6912 if (m->out) {
395e68ce 6913 output_normal(ctx, m->out, vlan);
614ec445
EJ
6914 } else if (vlan != m->out_vlan
6915 && !eth_addr_is_reserved(orig_flow->dl_dst)) {
9ba15e2a
BP
6916 struct ofbundle *bundle;
6917
6918 HMAP_FOR_EACH (bundle, hmap_node, &ofproto->bundles) {
6919 if (ofbundle_includes_vlan(bundle, m->out_vlan)
395e68ce
BP
6920 && !bundle->mirror_out) {
6921 output_normal(ctx, bundle, m->out_vlan);
abe529af
BP
6922 }
6923 }
6924 }
abe529af
BP
6925 }
6926}
6927
9d24de3b
JP
6928static void
6929update_mirror_stats(struct ofproto_dpif *ofproto, mirror_mask_t mirrors,
6930 uint64_t packets, uint64_t bytes)
6931{
6932 if (!mirrors) {
6933 return;
6934 }
6935
8472a3ce 6936 for (; mirrors; mirrors = zero_rightmost_1bit(mirrors)) {
9d24de3b
JP
6937 struct ofmirror *m;
6938
6939 m = ofproto->mirrors[mirror_mask_ffs(mirrors) - 1];
6940
6941 if (!m) {
6942 /* In normal circumstances 'm' will not be NULL. However,
6943 * if mirrors are reconfigured, we can temporarily get out
6944 * of sync in facet_revalidate(). We could "correct" the
6945 * mirror list before reaching here, but doing that would
6946 * not properly account the traffic stats we've currently
6947 * accumulated for previous mirror configuration. */
6948 continue;
6949 }
6950
6951 m->packet_count += packets;
6952 m->byte_count += bytes;
6953 }
6954}
6955
abe529af
BP
6956/* A VM broadcasts a gratuitous ARP to indicate that it has resumed after
6957 * migration. Older Citrix-patched Linux DomU used gratuitous ARP replies to
6958 * indicate this; newer upstream kernels use gratuitous ARP requests. */
6959static bool
6960is_gratuitous_arp(const struct flow *flow)
6961{
6962 return (flow->dl_type == htons(ETH_TYPE_ARP)
6963 && eth_addr_is_broadcast(flow->dl_dst)
6964 && (flow->nw_proto == ARP_OP_REPLY
6965 || (flow->nw_proto == ARP_OP_REQUEST
6966 && flow->nw_src == flow->nw_dst)));
6967}
6968
6969static void
6970update_learning_table(struct ofproto_dpif *ofproto,
6971 const struct flow *flow, int vlan,
6972 struct ofbundle *in_bundle)
6973{
6974 struct mac_entry *mac;
6975
33158a18
JP
6976 /* Don't learn the OFPP_NONE port. */
6977 if (in_bundle == &ofpp_none_bundle) {
6978 return;
6979 }
6980
abe529af
BP
6981 if (!mac_learning_may_learn(ofproto->ml, flow->dl_src, vlan)) {
6982 return;
6983 }
6984
6985 mac = mac_learning_insert(ofproto->ml, flow->dl_src, vlan);
6986 if (is_gratuitous_arp(flow)) {
6987 /* We don't want to learn from gratuitous ARP packets that are
6988 * reflected back over bond slaves so we lock the learning table. */
6989 if (!in_bundle->bond) {
6990 mac_entry_set_grat_arp_lock(mac);
6991 } else if (mac_entry_is_grat_arp_locked(mac)) {
6992 return;
6993 }
6994 }
6995
6996 if (mac_entry_is_new(mac) || mac->port.p != in_bundle) {
6997 /* The log messages here could actually be useful in debugging,
6998 * so keep the rate limit relatively high. */
6999 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(30, 300);
7000 VLOG_DBG_RL(&rl, "bridge %s: learned that "ETH_ADDR_FMT" is "
7001 "on port %s in VLAN %d",
7002 ofproto->up.name, ETH_ADDR_ARGS(flow->dl_src),
7003 in_bundle->name, vlan);
7004
7005 mac->port.p = in_bundle;
2cc3c58e 7006 tag_set_add(&ofproto->backer->revalidate_set,
abe529af
BP
7007 mac_learning_changed(ofproto->ml, mac));
7008 }
7009}
7010
3581c12c 7011static struct ofbundle *
4acbc98d
SH
7012lookup_input_bundle(const struct ofproto_dpif *ofproto, uint16_t in_port,
7013 bool warn, struct ofport_dpif **in_ofportp)
395e68ce
BP
7014{
7015 struct ofport_dpif *ofport;
7016
7017 /* Find the port and bundle for the received packet. */
7018 ofport = get_ofp_port(ofproto, in_port);
70c2fd56
BP
7019 if (in_ofportp) {
7020 *in_ofportp = ofport;
7021 }
395e68ce 7022 if (ofport && ofport->bundle) {
3581c12c 7023 return ofport->bundle;
395e68ce
BP
7024 }
7025
70c2fd56
BP
7026 /* Special-case OFPP_NONE, which a controller may use as the ingress
7027 * port for traffic that it is sourcing. */
7028 if (in_port == OFPP_NONE) {
7029 return &ofpp_none_bundle;
7030 }
7031
395e68ce
BP
7032 /* Odd. A few possible reasons here:
7033 *
7034 * - We deleted a port but there are still a few packets queued up
7035 * from it.
7036 *
7037 * - Someone externally added a port (e.g. "ovs-dpctl add-if") that
7038 * we don't know about.
7039 *
7040 * - The ofproto client didn't configure the port as part of a bundle.
6b803ddc
EJ
7041 * This is particularly likely to happen if a packet was received on the
7042 * port after it was created, but before the client had a chance to
7043 * configure its bundle.
395e68ce
BP
7044 */
7045 if (warn) {
7046 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
7047
7048 VLOG_WARN_RL(&rl, "bridge %s: received packet on unknown "
7049 "port %"PRIu16, ofproto->up.name, in_port);
7050 }
7051 return NULL;
7052}
7053
5da5ec37 7054/* Determines whether packets in 'flow' within 'ofproto' should be forwarded or
abe529af
BP
7055 * dropped. Returns true if they may be forwarded, false if they should be
7056 * dropped.
7057 *
395e68ce
BP
7058 * 'in_port' must be the ofport_dpif that corresponds to flow->in_port.
7059 * 'in_port' must be part of a bundle (e.g. in_port->bundle must be nonnull).
abe529af 7060 *
395e68ce
BP
7061 * 'vlan' must be the VLAN that corresponds to flow->vlan_tci on 'in_port', as
7062 * returned by input_vid_to_vlan(). It must be a valid VLAN for 'in_port', as
7063 * checked by input_vid_is_valid().
abe529af
BP
7064 *
7065 * May also add tags to '*tags', although the current implementation only does
7066 * so in one special case.
7067 */
7068static bool
479df176
BP
7069is_admissible(struct action_xlate_ctx *ctx, struct ofport_dpif *in_port,
7070 uint16_t vlan)
abe529af 7071{
479df176
BP
7072 struct ofproto_dpif *ofproto = ctx->ofproto;
7073 struct flow *flow = &ctx->flow;
395e68ce 7074 struct ofbundle *in_bundle = in_port->bundle;
abe529af 7075
395e68ce
BP
7076 /* Drop frames for reserved multicast addresses
7077 * only if forward_bpdu option is absent. */
614ec445 7078 if (!ofproto->up.forward_bpdu && eth_addr_is_reserved(flow->dl_dst)) {
479df176 7079 xlate_report(ctx, "packet has reserved destination MAC, dropping");
abe529af
BP
7080 return false;
7081 }
7082
abe529af
BP
7083 if (in_bundle->bond) {
7084 struct mac_entry *mac;
7085
7086 switch (bond_check_admissibility(in_bundle->bond, in_port,
479df176 7087 flow->dl_dst, &ctx->tags)) {
abe529af
BP
7088 case BV_ACCEPT:
7089 break;
7090
7091 case BV_DROP:
479df176 7092 xlate_report(ctx, "bonding refused admissibility, dropping");
abe529af
BP
7093 return false;
7094
7095 case BV_DROP_IF_MOVED:
7096 mac = mac_learning_lookup(ofproto->ml, flow->dl_src, vlan, NULL);
7097 if (mac && mac->port.p != in_bundle &&
7098 (!is_gratuitous_arp(flow)
7099 || mac_entry_is_grat_arp_locked(mac))) {
479df176
BP
7100 xlate_report(ctx, "SLB bond thinks this packet looped back, "
7101 "dropping");
abe529af
BP
7102 return false;
7103 }
7104 break;
7105 }
7106 }
7107
7108 return true;
7109}
7110
4cd78906 7111static void
abe529af
BP
7112xlate_normal(struct action_xlate_ctx *ctx)
7113{
395e68ce 7114 struct ofport_dpif *in_port;
abe529af 7115 struct ofbundle *in_bundle;
abe529af 7116 struct mac_entry *mac;
395e68ce
BP
7117 uint16_t vlan;
7118 uint16_t vid;
abe529af 7119
75a75043
BP
7120 ctx->has_normal = true;
7121
3581c12c 7122 in_bundle = lookup_input_bundle(ctx->ofproto, ctx->flow.in_port,
70c2fd56 7123 ctx->packet != NULL, &in_port);
3581c12c 7124 if (!in_bundle) {
479df176 7125 xlate_report(ctx, "no input bundle, dropping");
395e68ce
BP
7126 return;
7127 }
3581c12c 7128
395e68ce
BP
7129 /* Drop malformed frames. */
7130 if (ctx->flow.dl_type == htons(ETH_TYPE_VLAN) &&
7131 !(ctx->flow.vlan_tci & htons(VLAN_CFI))) {
7132 if (ctx->packet != NULL) {
7133 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
7134 VLOG_WARN_RL(&rl, "bridge %s: dropping packet with partial "
7135 "VLAN tag received on port %s",
7136 ctx->ofproto->up.name, in_bundle->name);
7137 }
479df176 7138 xlate_report(ctx, "partial VLAN tag, dropping");
395e68ce
BP
7139 return;
7140 }
7141
7142 /* Drop frames on bundles reserved for mirroring. */
7143 if (in_bundle->mirror_out) {
7144 if (ctx->packet != NULL) {
7145 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
7146 VLOG_WARN_RL(&rl, "bridge %s: dropping packet received on port "
7147 "%s, which is reserved exclusively for mirroring",
7148 ctx->ofproto->up.name, in_bundle->name);
7149 }
479df176 7150 xlate_report(ctx, "input port is mirror output port, dropping");
395e68ce
BP
7151 return;
7152 }
7153
7154 /* Check VLAN. */
7155 vid = vlan_tci_to_vid(ctx->flow.vlan_tci);
7156 if (!input_vid_is_valid(vid, in_bundle, ctx->packet != NULL)) {
479df176 7157 xlate_report(ctx, "disallowed VLAN VID for this input port, dropping");
395e68ce
BP
7158 return;
7159 }
7160 vlan = input_vid_to_vlan(in_bundle, vid);
7161
7162 /* Check other admissibility requirements. */
479df176 7163 if (in_port && !is_admissible(ctx, in_port, vlan)) {
395e68ce 7164 return;
abe529af
BP
7165 }
7166
75a75043 7167 /* Learn source MAC. */
3de9590b 7168 if (ctx->may_learn) {
abe529af
BP
7169 update_learning_table(ctx->ofproto, &ctx->flow, vlan, in_bundle);
7170 }
7171
7172 /* Determine output bundle. */
7173 mac = mac_learning_lookup(ctx->ofproto->ml, ctx->flow.dl_dst, vlan,
7174 &ctx->tags);
7175 if (mac) {
c06bba01 7176 if (mac->port.p != in_bundle) {
479df176 7177 xlate_report(ctx, "forwarding to learned port");
c06bba01 7178 output_normal(ctx, mac->port.p, vlan);
479df176
BP
7179 } else {
7180 xlate_report(ctx, "learned port is input port, dropping");
c06bba01 7181 }
abe529af 7182 } else {
c06bba01 7183 struct ofbundle *bundle;
abe529af 7184
479df176 7185 xlate_report(ctx, "no learned MAC for destination, flooding");
c06bba01
JP
7186 HMAP_FOR_EACH (bundle, hmap_node, &ctx->ofproto->bundles) {
7187 if (bundle != in_bundle
7188 && ofbundle_includes_vlan(bundle, vlan)
7189 && bundle->floodable
7190 && !bundle->mirror_out) {
7191 output_normal(ctx, bundle, vlan);
7192 }
7193 }
7194 ctx->nf_output_iface = NF_OUT_FLOOD;
abe529af 7195 }
abe529af
BP
7196}
7197\f
54a9cbc9
BP
7198/* Optimized flow revalidation.
7199 *
7200 * It's a difficult problem, in general, to tell which facets need to have
7201 * their actions recalculated whenever the OpenFlow flow table changes. We
7202 * don't try to solve that general problem: for most kinds of OpenFlow flow
7203 * table changes, we recalculate the actions for every facet. This is
7204 * relatively expensive, but it's good enough if the OpenFlow flow table
7205 * doesn't change very often.
7206 *
7207 * However, we can expect one particular kind of OpenFlow flow table change to
7208 * happen frequently: changes caused by MAC learning. To avoid wasting a lot
7209 * of CPU on revalidating every facet whenever MAC learning modifies the flow
7210 * table, we add a special case that applies to flow tables in which every rule
7211 * has the same form (that is, the same wildcards), except that the table is
7212 * also allowed to have a single "catch-all" flow that matches all packets. We
7213 * optimize this case by tagging all of the facets that resubmit into the table
7214 * and invalidating the same tag whenever a flow changes in that table. The
7215 * end result is that we revalidate just the facets that need it (and sometimes
7216 * a few more, but not all of the facets or even all of the facets that
7217 * resubmit to the table modified by MAC learning). */
7218
5cb7a798 7219/* Calculates the tag to use for 'flow' and mask 'mask' when it is inserted
54a9cbc9 7220 * into an OpenFlow table with the given 'basis'. */
822d9414 7221static tag_type
5cb7a798 7222rule_calculate_tag(const struct flow *flow, const struct minimask *mask,
54a9cbc9
BP
7223 uint32_t secret)
7224{
5cb7a798 7225 if (minimask_is_catchall(mask)) {
54a9cbc9
BP
7226 return 0;
7227 } else {
5cb7a798
BP
7228 uint32_t hash = flow_hash_in_minimask(flow, mask, secret);
7229 return tag_create_deterministic(hash);
54a9cbc9
BP
7230 }
7231}
7232
7233/* Following a change to OpenFlow table 'table_id' in 'ofproto', update the
7234 * taggability of that table.
7235 *
7236 * This function must be called after *each* change to a flow table. If you
7237 * skip calling it on some changes then the pointer comparisons at the end can
7238 * be invalid if you get unlucky. For example, if a flow removal causes a
7239 * cls_table to be destroyed and then a flow insertion causes a cls_table with
7240 * different wildcards to be created with the same address, then this function
7241 * will incorrectly skip revalidation. */
7242static void
7243table_update_taggable(struct ofproto_dpif *ofproto, uint8_t table_id)
7244{
7245 struct table_dpif *table = &ofproto->tables[table_id];
d0918789 7246 const struct oftable *oftable = &ofproto->up.tables[table_id];
54a9cbc9
BP
7247 struct cls_table *catchall, *other;
7248 struct cls_table *t;
7249
7250 catchall = other = NULL;
7251
d0918789 7252 switch (hmap_count(&oftable->cls.tables)) {
54a9cbc9
BP
7253 case 0:
7254 /* We could tag this OpenFlow table but it would make the logic a
7255 * little harder and it's a corner case that doesn't seem worth it
7256 * yet. */
7257 break;
7258
7259 case 1:
7260 case 2:
d0918789 7261 HMAP_FOR_EACH (t, hmap_node, &oftable->cls.tables) {
54a9cbc9
BP
7262 if (cls_table_is_catchall(t)) {
7263 catchall = t;
7264 } else if (!other) {
7265 other = t;
7266 } else {
7267 /* Indicate that we can't tag this by setting both tables to
7268 * NULL. (We know that 'catchall' is already NULL.) */
7269 other = NULL;
7270 }
7271 }
7272 break;
7273
7274 default:
7275 /* Can't tag this table. */
7276 break;
7277 }
7278
7279 if (table->catchall_table != catchall || table->other_table != other) {
7280 table->catchall_table = catchall;
7281 table->other_table = other;
2cc3c58e 7282 ofproto->backer->need_revalidate = REV_FLOW_TABLE;
54a9cbc9
BP
7283 }
7284}
7285
7286/* Given 'rule' that has changed in some way (either it is a rule being
7287 * inserted, a rule being deleted, or a rule whose actions are being
7288 * modified), marks facets for revalidation to ensure that packets will be
7289 * forwarded correctly according to the new state of the flow table.
7290 *
7291 * This function must be called after *each* change to a flow table. See
7292 * the comment on table_update_taggable() for more information. */
7293static void
7294rule_invalidate(const struct rule_dpif *rule)
7295{
7296 struct ofproto_dpif *ofproto = ofproto_dpif_cast(rule->up.ofproto);
7297
7298 table_update_taggable(ofproto, rule->up.table_id);
7299
2cc3c58e 7300 if (!ofproto->backer->need_revalidate) {
54a9cbc9
BP
7301 struct table_dpif *table = &ofproto->tables[rule->up.table_id];
7302
7303 if (table->other_table && rule->tag) {
2cc3c58e 7304 tag_set_add(&ofproto->backer->revalidate_set, rule->tag);
54a9cbc9 7305 } else {
2cc3c58e 7306 ofproto->backer->need_revalidate = REV_FLOW_TABLE;
54a9cbc9
BP
7307 }
7308 }
7309}
7310\f
abe529af 7311static bool
7257b535
BP
7312set_frag_handling(struct ofproto *ofproto_,
7313 enum ofp_config_flags frag_handling)
abe529af
BP
7314{
7315 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
7257b535 7316 if (frag_handling != OFPC_FRAG_REASM) {
2cc3c58e 7317 ofproto->backer->need_revalidate = REV_RECONFIGURE;
7257b535
BP
7318 return true;
7319 } else {
7320 return false;
7321 }
abe529af
BP
7322}
7323
90bf1e07 7324static enum ofperr
abe529af
BP
7325packet_out(struct ofproto *ofproto_, struct ofpbuf *packet,
7326 const struct flow *flow,
f25d0cf3 7327 const struct ofpact *ofpacts, size_t ofpacts_len)
abe529af
BP
7328{
7329 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
548de4dd
BP
7330 struct odputil_keybuf keybuf;
7331 struct dpif_flow_stats stats;
abe529af 7332
548de4dd 7333 struct ofpbuf key;
112bc5f4 7334
548de4dd
BP
7335 struct action_xlate_ctx ctx;
7336 uint64_t odp_actions_stub[1024 / 8];
7337 struct ofpbuf odp_actions;
80e5eed9 7338
548de4dd 7339 ofpbuf_use_stack(&key, &keybuf, sizeof keybuf);
e1b1d06a
JP
7340 odp_flow_key_from_flow(&key, flow,
7341 ofp_port_to_odp_port(ofproto, flow->in_port));
050ac423 7342
548de4dd 7343 dpif_flow_stats_extract(flow, packet, time_msec(), &stats);
abe529af 7344
548de4dd
BP
7345 action_xlate_ctx_init(&ctx, ofproto, flow, flow->vlan_tci, NULL,
7346 packet_get_tcp_flags(packet, flow), packet);
7347 ctx.resubmit_stats = &stats;
2284188b 7348
548de4dd
BP
7349 ofpbuf_use_stub(&odp_actions,
7350 odp_actions_stub, sizeof odp_actions_stub);
7351 xlate_actions(&ctx, ofpacts, ofpacts_len, &odp_actions);
acf60855 7352 dpif_execute(ofproto->backer->dpif, key.data, key.size,
548de4dd
BP
7353 odp_actions.data, odp_actions.size, packet);
7354 ofpbuf_uninit(&odp_actions);
2284188b 7355
548de4dd 7356 return 0;
abe529af 7357}
6fca1ffb
BP
7358\f
7359/* NetFlow. */
7360
7361static int
7362set_netflow(struct ofproto *ofproto_,
7363 const struct netflow_options *netflow_options)
7364{
7365 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
7366
7367 if (netflow_options) {
7368 if (!ofproto->netflow) {
7369 ofproto->netflow = netflow_create();
7370 }
7371 return netflow_set_options(ofproto->netflow, netflow_options);
7372 } else {
7373 netflow_destroy(ofproto->netflow);
7374 ofproto->netflow = NULL;
7375 return 0;
7376 }
7377}
abe529af
BP
7378
7379static void
7380get_netflow_ids(const struct ofproto *ofproto_,
7381 uint8_t *engine_type, uint8_t *engine_id)
7382{
7383 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
7384
acf60855 7385 dpif_get_netflow_ids(ofproto->backer->dpif, engine_type, engine_id);
abe529af 7386}
6fca1ffb
BP
7387
7388static void
7389send_active_timeout(struct ofproto_dpif *ofproto, struct facet *facet)
7390{
7391 if (!facet_is_controller_flow(facet) &&
7392 netflow_active_timeout_expired(ofproto->netflow, &facet->nf_flow)) {
b0f7b9b5 7393 struct subfacet *subfacet;
6fca1ffb
BP
7394 struct ofexpired expired;
7395
b0f7b9b5 7396 LIST_FOR_EACH (subfacet, list_node, &facet->subfacets) {
6a7e895f 7397 if (subfacet->path == SF_FAST_PATH) {
b0f7b9b5 7398 struct dpif_flow_stats stats;
6fca1ffb 7399
6a7e895f 7400 subfacet_reinstall(subfacet, &stats);
15baa734 7401 subfacet_update_stats(subfacet, &stats);
b0f7b9b5 7402 }
6fca1ffb
BP
7403 }
7404
7405 expired.flow = facet->flow;
7406 expired.packet_count = facet->packet_count;
7407 expired.byte_count = facet->byte_count;
7408 expired.used = facet->used;
7409 netflow_expire(ofproto->netflow, &facet->nf_flow, &expired);
7410 }
7411}
7412
7413static void
7414send_netflow_active_timeouts(struct ofproto_dpif *ofproto)
7415{
7416 struct facet *facet;
7417
7418 HMAP_FOR_EACH (facet, hmap_node, &ofproto->facets) {
7419 send_active_timeout(ofproto, facet);
7420 }
7421}
abe529af
BP
7422\f
7423static struct ofproto_dpif *
7424ofproto_dpif_lookup(const char *name)
7425{
b44a10b7
BP
7426 struct ofproto_dpif *ofproto;
7427
7428 HMAP_FOR_EACH_WITH_HASH (ofproto, all_ofproto_dpifs_node,
7429 hash_string(name, 0), &all_ofproto_dpifs) {
7430 if (!strcmp(ofproto->up.name, name)) {
7431 return ofproto;
7432 }
7433 }
7434 return NULL;
abe529af
BP
7435}
7436
f0a3aa2e 7437static void
96e466a3 7438ofproto_unixctl_fdb_flush(struct unixctl_conn *conn, int argc,
0e15264f 7439 const char *argv[], void *aux OVS_UNUSED)
f0a3aa2e 7440{
490df1ef 7441 struct ofproto_dpif *ofproto;
f0a3aa2e 7442
96e466a3
EJ
7443 if (argc > 1) {
7444 ofproto = ofproto_dpif_lookup(argv[1]);
7445 if (!ofproto) {
bde9f75d 7446 unixctl_command_reply_error(conn, "no such bridge");
96e466a3
EJ
7447 return;
7448 }
2cc3c58e 7449 mac_learning_flush(ofproto->ml, &ofproto->backer->revalidate_set);
96e466a3
EJ
7450 } else {
7451 HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_node, &all_ofproto_dpifs) {
2cc3c58e 7452 mac_learning_flush(ofproto->ml, &ofproto->backer->revalidate_set);
96e466a3 7453 }
f0a3aa2e 7454 }
f0a3aa2e 7455
bde9f75d 7456 unixctl_command_reply(conn, "table successfully flushed");
f0a3aa2e
AA
7457}
7458
abe529af 7459static void
0e15264f
BP
7460ofproto_unixctl_fdb_show(struct unixctl_conn *conn, int argc OVS_UNUSED,
7461 const char *argv[], void *aux OVS_UNUSED)
abe529af
BP
7462{
7463 struct ds ds = DS_EMPTY_INITIALIZER;
7464 const struct ofproto_dpif *ofproto;
7465 const struct mac_entry *e;
7466
0e15264f 7467 ofproto = ofproto_dpif_lookup(argv[1]);
abe529af 7468 if (!ofproto) {
bde9f75d 7469 unixctl_command_reply_error(conn, "no such bridge");
abe529af
BP
7470 return;
7471 }
7472
7473 ds_put_cstr(&ds, " port VLAN MAC Age\n");
7474 LIST_FOR_EACH (e, lru_node, &ofproto->ml->lrus) {
7475 struct ofbundle *bundle = e->port.p;
7476 ds_put_format(&ds, "%5d %4d "ETH_ADDR_FMT" %3d\n",
7477 ofbundle_get_a_port(bundle)->odp_port,
e764773c
BP
7478 e->vlan, ETH_ADDR_ARGS(e->mac),
7479 mac_entry_age(ofproto->ml, e));
abe529af 7480 }
bde9f75d 7481 unixctl_command_reply(conn, ds_cstr(&ds));
abe529af
BP
7482 ds_destroy(&ds);
7483}
7484
6a6455e5 7485struct trace_ctx {
abe529af
BP
7486 struct action_xlate_ctx ctx;
7487 struct flow flow;
7488 struct ds *result;
7489};
7490
7491static void
29901626
BP
7492trace_format_rule(struct ds *result, uint8_t table_id, int level,
7493 const struct rule_dpif *rule)
abe529af
BP
7494{
7495 ds_put_char_multiple(result, '\t', level);
7496 if (!rule) {
7497 ds_put_cstr(result, "No match\n");
7498 return;
7499 }
7500
29901626
BP
7501 ds_put_format(result, "Rule: table=%"PRIu8" cookie=%#"PRIx64" ",
7502 table_id, ntohll(rule->up.flow_cookie));
79feb7df 7503 cls_rule_format(&rule->up.cr, result);
abe529af
BP
7504 ds_put_char(result, '\n');
7505
7506 ds_put_char_multiple(result, '\t', level);
7507 ds_put_cstr(result, "OpenFlow ");
f25d0cf3 7508 ofpacts_format(rule->up.ofpacts, rule->up.ofpacts_len, result);
abe529af
BP
7509 ds_put_char(result, '\n');
7510}
7511
7512static void
7513trace_format_flow(struct ds *result, int level, const char *title,
6a6455e5 7514 struct trace_ctx *trace)
abe529af
BP
7515{
7516 ds_put_char_multiple(result, '\t', level);
7517 ds_put_format(result, "%s: ", title);
7518 if (flow_equal(&trace->ctx.flow, &trace->flow)) {
7519 ds_put_cstr(result, "unchanged");
7520 } else {
7521 flow_format(result, &trace->ctx.flow);
7522 trace->flow = trace->ctx.flow;
7523 }
7524 ds_put_char(result, '\n');
7525}
7526
eb9e1c26
EJ
7527static void
7528trace_format_regs(struct ds *result, int level, const char *title,
6a6455e5 7529 struct trace_ctx *trace)
eb9e1c26
EJ
7530{
7531 size_t i;
7532
7533 ds_put_char_multiple(result, '\t', level);
7534 ds_put_format(result, "%s:", title);
7535 for (i = 0; i < FLOW_N_REGS; i++) {
7536 ds_put_format(result, " reg%zu=0x%"PRIx32, i, trace->flow.regs[i]);
7537 }
7538 ds_put_char(result, '\n');
7539}
7540
1ed8d352
EJ
7541static void
7542trace_format_odp(struct ds *result, int level, const char *title,
6a6455e5 7543 struct trace_ctx *trace)
1ed8d352
EJ
7544{
7545 struct ofpbuf *odp_actions = trace->ctx.odp_actions;
7546
7547 ds_put_char_multiple(result, '\t', level);
7548 ds_put_format(result, "%s: ", title);
7549 format_odp_actions(result, odp_actions->data, odp_actions->size);
7550 ds_put_char(result, '\n');
7551}
7552
abe529af
BP
7553static void
7554trace_resubmit(struct action_xlate_ctx *ctx, struct rule_dpif *rule)
7555{
6a6455e5 7556 struct trace_ctx *trace = CONTAINER_OF(ctx, struct trace_ctx, ctx);
abe529af
BP
7557 struct ds *result = trace->result;
7558
7559 ds_put_char(result, '\n');
7560 trace_format_flow(result, ctx->recurse + 1, "Resubmitted flow", trace);
eb9e1c26 7561 trace_format_regs(result, ctx->recurse + 1, "Resubmitted regs", trace);
1ed8d352 7562 trace_format_odp(result, ctx->recurse + 1, "Resubmitted odp", trace);
29901626 7563 trace_format_rule(result, ctx->table_id, ctx->recurse + 1, rule);
abe529af
BP
7564}
7565
479df176
BP
7566static void
7567trace_report(struct action_xlate_ctx *ctx, const char *s)
7568{
7569 struct trace_ctx *trace = CONTAINER_OF(ctx, struct trace_ctx, ctx);
7570 struct ds *result = trace->result;
7571
7572 ds_put_char_multiple(result, '\t', ctx->recurse);
7573 ds_put_cstr(result, s);
7574 ds_put_char(result, '\n');
7575}
7576
abe529af 7577static void
0e15264f 7578ofproto_unixctl_trace(struct unixctl_conn *conn, int argc, const char *argv[],
abe529af
BP
7579 void *aux OVS_UNUSED)
7580{
0e15264f 7581 const char *dpname = argv[1];
abe529af 7582 struct ofproto_dpif *ofproto;
876b0e1c
BP
7583 struct ofpbuf odp_key;
7584 struct ofpbuf *packet;
e84173dc 7585 ovs_be16 initial_tci;
abe529af
BP
7586 struct ds result;
7587 struct flow flow;
abe529af
BP
7588 char *s;
7589
876b0e1c
BP
7590 packet = NULL;
7591 ofpbuf_init(&odp_key, 0);
abe529af
BP
7592 ds_init(&result);
7593
e84173dc
BP
7594 ofproto = ofproto_dpif_lookup(dpname);
7595 if (!ofproto) {
bde9f75d
EJ
7596 unixctl_command_reply_error(conn, "Unknown ofproto (use ofproto/list "
7597 "for help)");
e84173dc
BP
7598 goto exit;
7599 }
0e15264f 7600 if (argc == 3 || (argc == 4 && !strcmp(argv[3], "-generate"))) {
8b3b8dd1 7601 /* ofproto/trace dpname flow [-generate] */
0e15264f
BP
7602 const char *flow_s = argv[2];
7603 const char *generate_s = argv[3];
876b0e1c 7604
31a19d69
BP
7605 /* Allow 'flow_s' to be either a datapath flow or an OpenFlow-like
7606 * flow. We guess which type it is based on whether 'flow_s' contains
7607 * an '(', since a datapath flow always contains '(') but an
7608 * OpenFlow-like flow should not (in fact it's allowed but I believe
7609 * that's not documented anywhere).
7610 *
7611 * An alternative would be to try to parse 'flow_s' both ways, but then
7612 * it would be tricky giving a sensible error message. After all, do
7613 * you just say "syntax error" or do you present both error messages?
7614 * Both choices seem lousy. */
7615 if (strchr(flow_s, '(')) {
7616 int error;
7617
7618 /* Convert string to datapath key. */
7619 ofpbuf_init(&odp_key, 0);
7620 error = odp_flow_key_from_string(flow_s, NULL, &odp_key);
7621 if (error) {
7622 unixctl_command_reply_error(conn, "Bad flow syntax");
7623 goto exit;
7624 }
876b0e1c 7625
e09ee259
EJ
7626 /* XXX: Since we allow the user to specify an ofproto, it's
7627 * possible they will specify a different ofproto than the one the
7628 * port actually belongs too. Ideally we should simply remove the
7629 * ability to specify the ofproto. */
7630 if (ofproto_receive(ofproto->backer, NULL, odp_key.data,
7631 odp_key.size, &flow, NULL, NULL, NULL,
7632 &initial_tci)) {
31a19d69
BP
7633 unixctl_command_reply_error(conn, "Invalid flow");
7634 goto exit;
7635 }
7636 } else {
7637 char *error_s;
7638
7639 error_s = parse_ofp_exact_flow(&flow, argv[2]);
7640 if (error_s) {
7641 unixctl_command_reply_error(conn, error_s);
7642 free(error_s);
7643 goto exit;
7644 }
7645
7646 initial_tci = flow.vlan_tci;
876b0e1c 7647 }
8b3b8dd1
BP
7648
7649 /* Generate a packet, if requested. */
0e15264f 7650 if (generate_s) {
8b3b8dd1
BP
7651 packet = ofpbuf_new(0);
7652 flow_compose(packet, &flow);
7653 }
72e8bf28
AA
7654 } else if (argc == 7) {
7655 /* ofproto/trace dpname priority tun_id in_port mark packet */
0e15264f
BP
7656 const char *priority_s = argv[2];
7657 const char *tun_id_s = argv[3];
7658 const char *in_port_s = argv[4];
72e8bf28
AA
7659 const char *mark_s = argv[5];
7660 const char *packet_s = argv[6];
9b56fe13 7661 uint32_t in_port = atoi(in_port_s);
0e15264f
BP
7662 ovs_be64 tun_id = htonll(strtoull(tun_id_s, NULL, 0));
7663 uint32_t priority = atoi(priority_s);
72e8bf28 7664 uint32_t mark = atoi(mark_s);
e22f1753 7665 const char *msg;
0e15264f 7666
e22f1753
BP
7667 msg = eth_from_hex(packet_s, &packet);
7668 if (msg) {
bde9f75d 7669 unixctl_command_reply_error(conn, msg);
876b0e1c
BP
7670 goto exit;
7671 }
7672
7673 ds_put_cstr(&result, "Packet: ");
c499c75d 7674 s = ofp_packet_to_string(packet->data, packet->size);
876b0e1c
BP
7675 ds_put_cstr(&result, s);
7676 free(s);
7677
72e8bf28 7678 flow_extract(packet, priority, mark, NULL, in_port, &flow);
296e07ac 7679 flow.tunnel.tun_id = tun_id;
e84173dc 7680 initial_tci = flow.vlan_tci;
876b0e1c 7681 } else {
bde9f75d 7682 unixctl_command_reply_error(conn, "Bad command syntax");
abe529af
BP
7683 goto exit;
7684 }
7685
6a6455e5
EJ
7686 ofproto_trace(ofproto, &flow, packet, initial_tci, &result);
7687 unixctl_command_reply(conn, ds_cstr(&result));
7688
7689exit:
7690 ds_destroy(&result);
7691 ofpbuf_delete(packet);
7692 ofpbuf_uninit(&odp_key);
7693}
7694
7695static void
7696ofproto_trace(struct ofproto_dpif *ofproto, const struct flow *flow,
7697 const struct ofpbuf *packet, ovs_be16 initial_tci,
7698 struct ds *ds)
7699{
7700 struct rule_dpif *rule;
7701
7702 ds_put_cstr(ds, "Flow: ");
7703 flow_format(ds, flow);
7704 ds_put_char(ds, '\n');
abe529af 7705
c57b2226
BP
7706 rule = rule_dpif_lookup(ofproto, flow);
7707
6a6455e5 7708 trace_format_rule(ds, 0, 0, rule);
c57b2226
BP
7709 if (rule == ofproto->miss_rule) {
7710 ds_put_cstr(ds, "\nNo match, flow generates \"packet in\"s.\n");
7711 } else if (rule == ofproto->no_packet_in_rule) {
7712 ds_put_cstr(ds, "\nNo match, packets dropped because "
7713 "OFPPC_NO_PACKET_IN is set on in_port.\n");
7714 }
7715
abe529af 7716 if (rule) {
050ac423
BP
7717 uint64_t odp_actions_stub[1024 / 8];
7718 struct ofpbuf odp_actions;
7719
6a6455e5 7720 struct trace_ctx trace;
0e553d9c 7721 uint8_t tcp_flags;
abe529af 7722
6a6455e5
EJ
7723 tcp_flags = packet ? packet_get_tcp_flags(packet, flow) : 0;
7724 trace.result = ds;
7725 trace.flow = *flow;
050ac423
BP
7726 ofpbuf_use_stub(&odp_actions,
7727 odp_actions_stub, sizeof odp_actions_stub);
6a6455e5 7728 action_xlate_ctx_init(&trace.ctx, ofproto, flow, initial_tci,
0e553d9c 7729 rule, tcp_flags, packet);
abe529af 7730 trace.ctx.resubmit_hook = trace_resubmit;
479df176 7731 trace.ctx.report_hook = trace_report;
f25d0cf3 7732 xlate_actions(&trace.ctx, rule->up.ofpacts, rule->up.ofpacts_len,
050ac423 7733 &odp_actions);
abe529af 7734
6a6455e5
EJ
7735 ds_put_char(ds, '\n');
7736 trace_format_flow(ds, 0, "Final flow", &trace);
7737 ds_put_cstr(ds, "Datapath actions: ");
050ac423
BP
7738 format_odp_actions(ds, odp_actions.data, odp_actions.size);
7739 ofpbuf_uninit(&odp_actions);
876b0e1c 7740
6a7e895f
BP
7741 if (trace.ctx.slow) {
7742 enum slow_path_reason slow;
7743
7744 ds_put_cstr(ds, "\nThis flow is handled by the userspace "
7745 "slow path because it:");
7746 for (slow = trace.ctx.slow; slow; ) {
7747 enum slow_path_reason bit = rightmost_1bit(slow);
7748
7749 switch (bit) {
7750 case SLOW_CFM:
7751 ds_put_cstr(ds, "\n\t- Consists of CFM packets.");
7752 break;
7753 case SLOW_LACP:
7754 ds_put_cstr(ds, "\n\t- Consists of LACP packets.");
7755 break;
7756 case SLOW_STP:
7757 ds_put_cstr(ds, "\n\t- Consists of STP packets.");
7758 break;
7759 case SLOW_IN_BAND:
7760 ds_put_cstr(ds, "\n\t- Needs in-band special case "
7761 "processing.");
7762 if (!packet) {
7763 ds_put_cstr(ds, "\n\t (The datapath actions are "
7764 "incomplete--for complete actions, "
7765 "please supply a packet.)");
7766 }
7767 break;
7768 case SLOW_CONTROLLER:
7769 ds_put_cstr(ds, "\n\t- Sends \"packet-in\" messages "
7770 "to the OpenFlow controller.");
7771 break;
7772 case SLOW_MATCH:
7773 ds_put_cstr(ds, "\n\t- Needs more specific matching "
7774 "than the datapath supports.");
7775 break;
7776 }
7777
7778 slow &= ~bit;
7779 }
7780
7781 if (slow & ~SLOW_MATCH) {
7782 ds_put_cstr(ds, "\nThe datapath actions above do not reflect "
7783 "the special slow-path processing.");
876b0e1c
BP
7784 }
7785 }
abe529af 7786 }
abe529af
BP
7787}
7788
7ee20df1 7789static void
0e15264f
BP
7790ofproto_dpif_clog(struct unixctl_conn *conn OVS_UNUSED, int argc OVS_UNUSED,
7791 const char *argv[] OVS_UNUSED, void *aux OVS_UNUSED)
7ee20df1
BP
7792{
7793 clogged = true;
bde9f75d 7794 unixctl_command_reply(conn, NULL);
7ee20df1
BP
7795}
7796
7797static void
0e15264f
BP
7798ofproto_dpif_unclog(struct unixctl_conn *conn OVS_UNUSED, int argc OVS_UNUSED,
7799 const char *argv[] OVS_UNUSED, void *aux OVS_UNUSED)
7ee20df1
BP
7800{
7801 clogged = false;
bde9f75d 7802 unixctl_command_reply(conn, NULL);
7ee20df1
BP
7803}
7804
6814e51f
BP
7805/* Runs a self-check of flow translations in 'ofproto'. Appends a message to
7806 * 'reply' describing the results. */
7807static void
7808ofproto_dpif_self_check__(struct ofproto_dpif *ofproto, struct ds *reply)
7809{
7810 struct facet *facet;
7811 int errors;
7812
7813 errors = 0;
7814 HMAP_FOR_EACH (facet, hmap_node, &ofproto->facets) {
7815 if (!facet_check_consistency(facet)) {
7816 errors++;
7817 }
7818 }
7819 if (errors) {
2cc3c58e 7820 ofproto->backer->need_revalidate = REV_INCONSISTENCY;
6814e51f
BP
7821 }
7822
7823 if (errors) {
7824 ds_put_format(reply, "%s: self-check failed (%d errors)\n",
7825 ofproto->up.name, errors);
7826 } else {
7827 ds_put_format(reply, "%s: self-check passed\n", ofproto->up.name);
7828 }
7829}
7830
7831static void
7832ofproto_dpif_self_check(struct unixctl_conn *conn,
7833 int argc, const char *argv[], void *aux OVS_UNUSED)
7834{
7835 struct ds reply = DS_EMPTY_INITIALIZER;
7836 struct ofproto_dpif *ofproto;
7837
7838 if (argc > 1) {
7839 ofproto = ofproto_dpif_lookup(argv[1]);
7840 if (!ofproto) {
bde9f75d
EJ
7841 unixctl_command_reply_error(conn, "Unknown ofproto (use "
7842 "ofproto/list for help)");
6814e51f
BP
7843 return;
7844 }
7845 ofproto_dpif_self_check__(ofproto, &reply);
7846 } else {
7847 HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_node, &all_ofproto_dpifs) {
7848 ofproto_dpif_self_check__(ofproto, &reply);
7849 }
7850 }
7851
bde9f75d 7852 unixctl_command_reply(conn, ds_cstr(&reply));
6814e51f
BP
7853 ds_destroy(&reply);
7854}
7855
27022416
JP
7856/* Store the current ofprotos in 'ofproto_shash'. Returns a sorted list
7857 * of the 'ofproto_shash' nodes. It is the responsibility of the caller
7858 * to destroy 'ofproto_shash' and free the returned value. */
7859static const struct shash_node **
7860get_ofprotos(struct shash *ofproto_shash)
7861{
7862 const struct ofproto_dpif *ofproto;
7863
7864 HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_node, &all_ofproto_dpifs) {
7865 char *name = xasprintf("%s@%s", ofproto->up.type, ofproto->up.name);
7866 shash_add_nocopy(ofproto_shash, name, ofproto);
7867 }
7868
7869 return shash_sort(ofproto_shash);
7870}
7871
7872static void
7873ofproto_unixctl_dpif_dump_dps(struct unixctl_conn *conn, int argc OVS_UNUSED,
7874 const char *argv[] OVS_UNUSED,
7875 void *aux OVS_UNUSED)
7876{
7877 struct ds ds = DS_EMPTY_INITIALIZER;
7878 struct shash ofproto_shash;
7879 const struct shash_node **sorted_ofprotos;
7880 int i;
7881
7882 shash_init(&ofproto_shash);
7883 sorted_ofprotos = get_ofprotos(&ofproto_shash);
7884 for (i = 0; i < shash_count(&ofproto_shash); i++) {
7885 const struct shash_node *node = sorted_ofprotos[i];
7886 ds_put_format(&ds, "%s\n", node->name);
7887 }
7888
7889 shash_destroy(&ofproto_shash);
7890 free(sorted_ofprotos);
7891
7892 unixctl_command_reply(conn, ds_cstr(&ds));
7893 ds_destroy(&ds);
7894}
7895
7896static void
7897show_dp_format(const struct ofproto_dpif *ofproto, struct ds *ds)
7898{
7899 struct dpif_dp_stats s;
7900 const struct shash_node **ports;
7901 int i;
7902
acf60855 7903 dpif_get_dp_stats(ofproto->backer->dpif, &s);
27022416 7904
acf60855
JP
7905 ds_put_format(ds, "%s (%s):\n", ofproto->up.name,
7906 dpif_name(ofproto->backer->dpif));
7907 /* xxx It would be better to show bridge-specific stats instead
7908 * xxx of dp ones. */
27022416
JP
7909 ds_put_format(ds,
7910 "\tlookups: hit:%"PRIu64" missed:%"PRIu64" lost:%"PRIu64"\n",
7911 s.n_hit, s.n_missed, s.n_lost);
acf60855
JP
7912 ds_put_format(ds, "\tflows: %zu\n",
7913 hmap_count(&ofproto->subfacets));
27022416
JP
7914
7915 ports = shash_sort(&ofproto->up.port_by_name);
7916 for (i = 0; i < shash_count(&ofproto->up.port_by_name); i++) {
7917 const struct shash_node *node = ports[i];
7918 struct ofport *ofport = node->data;
7919 const char *name = netdev_get_name(ofport->netdev);
7920 const char *type = netdev_get_type(ofport->netdev);
0a740f48
EJ
7921 uint32_t odp_port;
7922
7923 ds_put_format(ds, "\t%s %u/", name, ofport->ofp_port);
7924
7925 odp_port = ofp_port_to_odp_port(ofproto, ofport->ofp_port);
7926 if (odp_port != OVSP_NONE) {
7927 ds_put_format(ds, "%"PRIu32":", odp_port);
7928 } else {
7929 ds_put_cstr(ds, "none:");
7930 }
27022416 7931
27022416
JP
7932 if (strcmp(type, "system")) {
7933 struct netdev *netdev;
7934 int error;
7935
7936 ds_put_format(ds, " (%s", type);
7937
7938 error = netdev_open(name, type, &netdev);
7939 if (!error) {
7940 struct smap config;
7941
7942 smap_init(&config);
7943 error = netdev_get_config(netdev, &config);
7944 if (!error) {
7945 const struct smap_node **nodes;
7946 size_t i;
7947
7948 nodes = smap_sort(&config);
7949 for (i = 0; i < smap_count(&config); i++) {
7950 const struct smap_node *node = nodes[i];
7951 ds_put_format(ds, "%c %s=%s", i ? ',' : ':',
7952 node->key, node->value);
7953 }
7954 free(nodes);
7955 }
7956 smap_destroy(&config);
7957
7958 netdev_close(netdev);
7959 }
7960 ds_put_char(ds, ')');
7961 }
7962 ds_put_char(ds, '\n');
7963 }
7964 free(ports);
7965}
7966
7967static void
7968ofproto_unixctl_dpif_show(struct unixctl_conn *conn, int argc,
7969 const char *argv[], void *aux OVS_UNUSED)
7970{
7971 struct ds ds = DS_EMPTY_INITIALIZER;
7972 const struct ofproto_dpif *ofproto;
7973
7974 if (argc > 1) {
7975 int i;
7976 for (i = 1; i < argc; i++) {
7977 ofproto = ofproto_dpif_lookup(argv[i]);
7978 if (!ofproto) {
7979 ds_put_format(&ds, "Unknown bridge %s (use dpif/dump-dps "
7980 "for help)", argv[i]);
7981 unixctl_command_reply_error(conn, ds_cstr(&ds));
7982 return;
7983 }
7984 show_dp_format(ofproto, &ds);
7985 }
7986 } else {
7987 struct shash ofproto_shash;
7988 const struct shash_node **sorted_ofprotos;
7989 int i;
7990
7991 shash_init(&ofproto_shash);
7992 sorted_ofprotos = get_ofprotos(&ofproto_shash);
7993 for (i = 0; i < shash_count(&ofproto_shash); i++) {
7994 const struct shash_node *node = sorted_ofprotos[i];
7995 show_dp_format(node->data, &ds);
7996 }
7997
7998 shash_destroy(&ofproto_shash);
7999 free(sorted_ofprotos);
8000 }
8001
8002 unixctl_command_reply(conn, ds_cstr(&ds));
8003 ds_destroy(&ds);
8004}
8005
8006static void
8007ofproto_unixctl_dpif_dump_flows(struct unixctl_conn *conn,
8008 int argc OVS_UNUSED, const char *argv[],
8009 void *aux OVS_UNUSED)
8010{
8011 struct ds ds = DS_EMPTY_INITIALIZER;
8012 const struct ofproto_dpif *ofproto;
8013 struct subfacet *subfacet;
8014
8015 ofproto = ofproto_dpif_lookup(argv[1]);
8016 if (!ofproto) {
8017 unixctl_command_reply_error(conn, "no such bridge");
8018 return;
8019 }
8020
af37354d
EJ
8021 update_stats(ofproto->backer);
8022
27022416
JP
8023 HMAP_FOR_EACH (subfacet, hmap_node, &ofproto->subfacets) {
8024 struct odputil_keybuf keybuf;
8025 struct ofpbuf key;
8026
8027 subfacet_get_key(subfacet, &keybuf, &key);
8028 odp_flow_key_format(key.data, key.size, &ds);
8029
8030 ds_put_format(&ds, ", packets:%"PRIu64", bytes:%"PRIu64", used:",
8031 subfacet->dp_packet_count, subfacet->dp_byte_count);
8032 if (subfacet->used) {
8033 ds_put_format(&ds, "%.3fs",
8034 (time_msec() - subfacet->used) / 1000.0);
8035 } else {
8036 ds_put_format(&ds, "never");
8037 }
8038 if (subfacet->facet->tcp_flags) {
8039 ds_put_cstr(&ds, ", flags:");
8040 packet_format_tcp_flags(&ds, subfacet->facet->tcp_flags);
8041 }
8042
8043 ds_put_cstr(&ds, ", actions:");
8044 format_odp_actions(&ds, subfacet->actions, subfacet->actions_len);
8045 ds_put_char(&ds, '\n');
8046 }
8047
8048 unixctl_command_reply(conn, ds_cstr(&ds));
8049 ds_destroy(&ds);
8050}
8051
8052static void
8053ofproto_unixctl_dpif_del_flows(struct unixctl_conn *conn,
8054 int argc OVS_UNUSED, const char *argv[],
8055 void *aux OVS_UNUSED)
8056{
8057 struct ds ds = DS_EMPTY_INITIALIZER;
8058 struct ofproto_dpif *ofproto;
8059
8060 ofproto = ofproto_dpif_lookup(argv[1]);
8061 if (!ofproto) {
8062 unixctl_command_reply_error(conn, "no such bridge");
8063 return;
8064 }
8065
8066 flush(&ofproto->up);
8067
8068 unixctl_command_reply(conn, ds_cstr(&ds));
8069 ds_destroy(&ds);
8070}
8071
abe529af
BP
8072static void
8073ofproto_dpif_unixctl_init(void)
8074{
8075 static bool registered;
8076 if (registered) {
8077 return;
8078 }
8079 registered = true;
8080
0e15264f
BP
8081 unixctl_command_register(
8082 "ofproto/trace",
72e8bf28
AA
8083 "bridge {priority tun_id in_port mark packet | odp_flow [-generate]}",
8084 2, 6, ofproto_unixctl_trace, NULL);
96e466a3 8085 unixctl_command_register("fdb/flush", "[bridge]", 0, 1,
0e15264f
BP
8086 ofproto_unixctl_fdb_flush, NULL);
8087 unixctl_command_register("fdb/show", "bridge", 1, 1,
8088 ofproto_unixctl_fdb_show, NULL);
8089 unixctl_command_register("ofproto/clog", "", 0, 0,
8090 ofproto_dpif_clog, NULL);
8091 unixctl_command_register("ofproto/unclog", "", 0, 0,
8092 ofproto_dpif_unclog, NULL);
6814e51f
BP
8093 unixctl_command_register("ofproto/self-check", "[bridge]", 0, 1,
8094 ofproto_dpif_self_check, NULL);
27022416
JP
8095 unixctl_command_register("dpif/dump-dps", "", 0, 0,
8096 ofproto_unixctl_dpif_dump_dps, NULL);
8097 unixctl_command_register("dpif/show", "[bridge]", 0, INT_MAX,
8098 ofproto_unixctl_dpif_show, NULL);
8099 unixctl_command_register("dpif/dump-flows", "bridge", 1, 1,
8100 ofproto_unixctl_dpif_dump_flows, NULL);
8101 unixctl_command_register("dpif/del-flows", "bridge", 1, 1,
8102 ofproto_unixctl_dpif_del_flows, NULL);
abe529af
BP
8103}
8104\f
52a90c29
BP
8105/* Linux VLAN device support (e.g. "eth0.10" for VLAN 10.)
8106 *
8107 * This is deprecated. It is only for compatibility with broken device drivers
8108 * in old versions of Linux that do not properly support VLANs when VLAN
8109 * devices are not used. When broken device drivers are no longer in
8110 * widespread use, we will delete these interfaces. */
8111
8112static int
8113set_realdev(struct ofport *ofport_, uint16_t realdev_ofp_port, int vid)
8114{
8115 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofport_->ofproto);
8116 struct ofport_dpif *ofport = ofport_dpif_cast(ofport_);
8117
8118 if (realdev_ofp_port == ofport->realdev_ofp_port
8119 && vid == ofport->vlandev_vid) {
8120 return 0;
8121 }
8122
2cc3c58e 8123 ofproto->backer->need_revalidate = REV_RECONFIGURE;
52a90c29
BP
8124
8125 if (ofport->realdev_ofp_port) {
8126 vsp_remove(ofport);
8127 }
8128 if (realdev_ofp_port && ofport->bundle) {
8129 /* vlandevs are enslaved to their realdevs, so they are not allowed to
8130 * themselves be part of a bundle. */
8131 bundle_set(ofport->up.ofproto, ofport->bundle, NULL);
8132 }
8133
8134 ofport->realdev_ofp_port = realdev_ofp_port;
8135 ofport->vlandev_vid = vid;
8136
8137 if (realdev_ofp_port) {
8138 vsp_add(ofport, realdev_ofp_port, vid);
8139 }
8140
8141 return 0;
8142}
8143
8144static uint32_t
8145hash_realdev_vid(uint16_t realdev_ofp_port, int vid)
8146{
8147 return hash_2words(realdev_ofp_port, vid);
8148}
8149
40e05935
BP
8150/* Returns the ODP port number of the Linux VLAN device that corresponds to
8151 * 'vlan_tci' on the network device with port number 'realdev_odp_port' in
8152 * 'ofproto'. For example, given 'realdev_odp_port' of eth0 and 'vlan_tci' 9,
8153 * it would return the port number of eth0.9.
8154 *
8155 * Unless VLAN splinters are enabled for port 'realdev_odp_port', this
8156 * function just returns its 'realdev_odp_port' argument. */
52a90c29
BP
8157static uint32_t
8158vsp_realdev_to_vlandev(const struct ofproto_dpif *ofproto,
8159 uint32_t realdev_odp_port, ovs_be16 vlan_tci)
8160{
8161 if (!hmap_is_empty(&ofproto->realdev_vid_map)) {
e1b1d06a 8162 uint16_t realdev_ofp_port;
52a90c29
BP
8163 int vid = vlan_tci_to_vid(vlan_tci);
8164 const struct vlan_splinter *vsp;
8165
e1b1d06a 8166 realdev_ofp_port = odp_port_to_ofp_port(ofproto, realdev_odp_port);
52a90c29
BP
8167 HMAP_FOR_EACH_WITH_HASH (vsp, realdev_vid_node,
8168 hash_realdev_vid(realdev_ofp_port, vid),
8169 &ofproto->realdev_vid_map) {
8170 if (vsp->realdev_ofp_port == realdev_ofp_port
8171 && vsp->vid == vid) {
e1b1d06a 8172 return ofp_port_to_odp_port(ofproto, vsp->vlandev_ofp_port);
52a90c29
BP
8173 }
8174 }
8175 }
8176 return realdev_odp_port;
8177}
8178
8179static struct vlan_splinter *
8180vlandev_find(const struct ofproto_dpif *ofproto, uint16_t vlandev_ofp_port)
8181{
8182 struct vlan_splinter *vsp;
8183
8184 HMAP_FOR_EACH_WITH_HASH (vsp, vlandev_node, hash_int(vlandev_ofp_port, 0),
8185 &ofproto->vlandev_map) {
8186 if (vsp->vlandev_ofp_port == vlandev_ofp_port) {
8187 return vsp;
8188 }
8189 }
8190
8191 return NULL;
8192}
8193
40e05935
BP
8194/* Returns the OpenFlow port number of the "real" device underlying the Linux
8195 * VLAN device with OpenFlow port number 'vlandev_ofp_port' and stores the
8196 * VLAN VID of the Linux VLAN device in '*vid'. For example, given
8197 * 'vlandev_ofp_port' of eth0.9, it would return the OpenFlow port number of
8198 * eth0 and store 9 in '*vid'.
8199 *
8200 * Returns 0 and does not modify '*vid' if 'vlandev_ofp_port' is not a Linux
8201 * VLAN device. Unless VLAN splinters are enabled, this is what this function
8202 * always does.*/
52a90c29
BP
8203static uint16_t
8204vsp_vlandev_to_realdev(const struct ofproto_dpif *ofproto,
40e05935 8205 uint16_t vlandev_ofp_port, int *vid)
52a90c29
BP
8206{
8207 if (!hmap_is_empty(&ofproto->vlandev_map)) {
8208 const struct vlan_splinter *vsp;
8209
8210 vsp = vlandev_find(ofproto, vlandev_ofp_port);
8211 if (vsp) {
8212 if (vid) {
8213 *vid = vsp->vid;
8214 }
8215 return vsp->realdev_ofp_port;
8216 }
8217 }
8218 return 0;
8219}
8220
b98d8985
BP
8221/* Given 'flow', a flow representing a packet received on 'ofproto', checks
8222 * whether 'flow->in_port' represents a Linux VLAN device. If so, changes
8223 * 'flow->in_port' to the "real" device backing the VLAN device, sets
8224 * 'flow->vlan_tci' to the VLAN VID, and returns true. Otherwise (which is
8225 * always the case unless VLAN splinters are enabled), returns false without
8226 * making any changes. */
8227static bool
8228vsp_adjust_flow(const struct ofproto_dpif *ofproto, struct flow *flow)
8229{
8230 uint16_t realdev;
8231 int vid;
8232
8233 realdev = vsp_vlandev_to_realdev(ofproto, flow->in_port, &vid);
8234 if (!realdev) {
8235 return false;
8236 }
8237
8238 /* Cause the flow to be processed as if it came in on the real device with
8239 * the VLAN device's VLAN ID. */
8240 flow->in_port = realdev;
8241 flow->vlan_tci = htons((vid & VLAN_VID_MASK) | VLAN_CFI);
8242 return true;
8243}
8244
52a90c29
BP
8245static void
8246vsp_remove(struct ofport_dpif *port)
8247{
8248 struct ofproto_dpif *ofproto = ofproto_dpif_cast(port->up.ofproto);
8249 struct vlan_splinter *vsp;
8250
8251 vsp = vlandev_find(ofproto, port->up.ofp_port);
8252 if (vsp) {
8253 hmap_remove(&ofproto->vlandev_map, &vsp->vlandev_node);
8254 hmap_remove(&ofproto->realdev_vid_map, &vsp->realdev_vid_node);
8255 free(vsp);
8256
8257 port->realdev_ofp_port = 0;
8258 } else {
8259 VLOG_ERR("missing vlan device record");
8260 }
8261}
8262
8263static void
8264vsp_add(struct ofport_dpif *port, uint16_t realdev_ofp_port, int vid)
8265{
8266 struct ofproto_dpif *ofproto = ofproto_dpif_cast(port->up.ofproto);
8267
8268 if (!vsp_vlandev_to_realdev(ofproto, port->up.ofp_port, NULL)
8269 && (vsp_realdev_to_vlandev(ofproto, realdev_ofp_port, htons(vid))
8270 == realdev_ofp_port)) {
8271 struct vlan_splinter *vsp;
8272
8273 vsp = xmalloc(sizeof *vsp);
8274 hmap_insert(&ofproto->vlandev_map, &vsp->vlandev_node,
8275 hash_int(port->up.ofp_port, 0));
8276 hmap_insert(&ofproto->realdev_vid_map, &vsp->realdev_vid_node,
8277 hash_realdev_vid(realdev_ofp_port, vid));
8278 vsp->realdev_ofp_port = realdev_ofp_port;
8279 vsp->vlandev_ofp_port = port->up.ofp_port;
8280 vsp->vid = vid;
8281
8282 port->realdev_ofp_port = realdev_ofp_port;
8283 } else {
8284 VLOG_ERR("duplicate vlan device record");
8285 }
8286}
e1b1d06a
JP
8287
8288static uint32_t
8289ofp_port_to_odp_port(const struct ofproto_dpif *ofproto, uint16_t ofp_port)
8290{
8291 const struct ofport_dpif *ofport = get_ofp_port(ofproto, ofp_port);
8292 return ofport ? ofport->odp_port : OVSP_NONE;
8293}
8294
acf60855
JP
8295static struct ofport_dpif *
8296odp_port_to_ofport(const struct dpif_backer *backer, uint32_t odp_port)
e1b1d06a
JP
8297{
8298 struct ofport_dpif *port;
8299
8300 HMAP_FOR_EACH_IN_BUCKET (port, odp_port_node,
8301 hash_int(odp_port, 0),
acf60855 8302 &backer->odp_to_ofport_map) {
e1b1d06a 8303 if (port->odp_port == odp_port) {
acf60855 8304 return port;
e1b1d06a
JP
8305 }
8306 }
8307
acf60855
JP
8308 return NULL;
8309}
8310
8311static uint16_t
8312odp_port_to_ofp_port(const struct ofproto_dpif *ofproto, uint32_t odp_port)
8313{
8314 struct ofport_dpif *port;
8315
8316 port = odp_port_to_ofport(ofproto->backer, odp_port);
6472ba11 8317 if (port && &ofproto->up == port->up.ofproto) {
acf60855
JP
8318 return port->up.ofp_port;
8319 } else {
8320 return OFPP_NONE;
8321 }
e1b1d06a
JP
8322}
8323
abe529af 8324const struct ofproto_class ofproto_dpif_class = {
b0408fca 8325 init,
abe529af
BP
8326 enumerate_types,
8327 enumerate_names,
8328 del,
0aeaabc8 8329 port_open_type,
acf60855
JP
8330 type_run,
8331 type_run_fast,
8332 type_wait,
abe529af
BP
8333 alloc,
8334 construct,
8335 destruct,
8336 dealloc,
8337 run,
5fcc0d00 8338 run_fast,
abe529af 8339 wait,
0d085684 8340 get_memory_usage,
abe529af 8341 flush,
6c1491fb
BP
8342 get_features,
8343 get_tables,
abe529af
BP
8344 port_alloc,
8345 port_construct,
8346 port_destruct,
8347 port_dealloc,
8348 port_modified,
8349 port_reconfigured,
8350 port_query_by_name,
8351 port_add,
8352 port_del,
6527c598 8353 port_get_stats,
abe529af
BP
8354 port_dump_start,
8355 port_dump_next,
8356 port_dump_done,
8357 port_poll,
8358 port_poll_wait,
8359 port_is_lacp_current,
0ab6decf 8360 NULL, /* rule_choose_table */
abe529af
BP
8361 rule_alloc,
8362 rule_construct,
8363 rule_destruct,
8364 rule_dealloc,
abe529af
BP
8365 rule_get_stats,
8366 rule_execute,
8367 rule_modify_actions,
7257b535 8368 set_frag_handling,
abe529af
BP
8369 packet_out,
8370 set_netflow,
8371 get_netflow_ids,
8372 set_sflow,
8373 set_cfm,
a5610457 8374 get_cfm_fault,
1c0333b6 8375 get_cfm_opup,
1de11730 8376 get_cfm_remote_mpids,
3967a833 8377 get_cfm_health,
21f7563c
JP
8378 set_stp,
8379 get_stp_status,
8380 set_stp_port,
8381 get_stp_port_status,
8b36f51e 8382 set_queues,
abe529af
BP
8383 bundle_set,
8384 bundle_remove,
8385 mirror_set,
9d24de3b 8386 mirror_get_stats,
abe529af
BP
8387 set_flood_vlans,
8388 is_mirror_output_bundle,
8402c74b 8389 forward_bpdu_changed,
c4069512 8390 set_mac_table_config,
52a90c29 8391 set_realdev,
abe529af 8392};