]> git.proxmox.com Git - ovs.git/blame - ofproto/ofproto-dpif.c
tests: Add VXLAN and LISP tunnel tests to the unit test infrastructure.
[ovs.git] / ofproto / ofproto-dpif.c
CommitLineData
abe529af 1/*
e09ee259 2 * Copyright (c) 2009, 2010, 2011, 2012, 2013 Nicira, Inc.
abe529af
BP
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include <config.h>
18
5bee6e26 19#include "ofproto/ofproto-provider.h"
abe529af
BP
20
21#include <errno.h>
22
abe529af 23#include "bond.h"
daff3353 24#include "bundle.h"
abe529af
BP
25#include "byte-order.h"
26#include "connmgr.h"
27#include "coverage.h"
28#include "cfm.h"
29#include "dpif.h"
30#include "dynamic-string.h"
31#include "fail-open.h"
32#include "hmapx.h"
33#include "lacp.h"
75a75043 34#include "learn.h"
abe529af 35#include "mac-learning.h"
816fd533 36#include "meta-flow.h"
abe529af 37#include "multipath.h"
0a740f48 38#include "netdev-vport.h"
abe529af
BP
39#include "netdev.h"
40#include "netlink.h"
41#include "nx-match.h"
42#include "odp-util.h"
43#include "ofp-util.h"
44#include "ofpbuf.h"
f25d0cf3 45#include "ofp-actions.h"
31a19d69 46#include "ofp-parse.h"
abe529af 47#include "ofp-print.h"
9d6ac44e 48#include "ofproto-dpif-governor.h"
bae473fe 49#include "ofproto-dpif-sflow.h"
abe529af 50#include "poll-loop.h"
0d085684 51#include "simap.h"
27022416 52#include "smap.h"
abe529af 53#include "timer.h"
b9ad7294 54#include "tunnel.h"
6c1491fb 55#include "unaligned.h"
abe529af
BP
56#include "unixctl.h"
57#include "vlan-bitmap.h"
58#include "vlog.h"
59
60VLOG_DEFINE_THIS_MODULE(ofproto_dpif);
61
abe529af 62COVERAGE_DEFINE(ofproto_dpif_expired);
abe529af
BP
63COVERAGE_DEFINE(ofproto_dpif_xlate);
64COVERAGE_DEFINE(facet_changed_rule);
abe529af
BP
65COVERAGE_DEFINE(facet_revalidate);
66COVERAGE_DEFINE(facet_unexpected);
9d6ac44e 67COVERAGE_DEFINE(facet_suppress);
abe529af 68
29901626 69/* Maximum depth of flow table recursion (due to resubmit actions) in a
abe529af 70 * flow translation. */
1642690c 71#define MAX_RESUBMIT_RECURSION 64
abe529af 72
9cdaaebe
BP
73/* Number of implemented OpenFlow tables. */
74enum { N_TABLES = 255 };
c57b2226
BP
75enum { TBL_INTERNAL = N_TABLES - 1 }; /* Used for internal hidden rules. */
76BUILD_ASSERT_DECL(N_TABLES >= 2 && N_TABLES <= 255);
9cdaaebe 77
abe529af
BP
78struct ofport_dpif;
79struct ofproto_dpif;
a088a1ff 80struct flow_miss;
abe529af
BP
81
82struct rule_dpif {
83 struct rule up;
84
abe529af
BP
85 /* These statistics:
86 *
87 * - Do include packets and bytes from facets that have been deleted or
88 * whose own statistics have been folded into the rule.
89 *
90 * - Do include packets and bytes sent "by hand" that were accounted to
91 * the rule without any facet being involved (this is a rare corner
92 * case in rule_execute()).
93 *
94 * - Do not include packet or bytes that can be obtained from any facet's
95 * packet_count or byte_count member or that can be obtained from the
b0f7b9b5 96 * datapath by, e.g., dpif_flow_get() for any subfacet.
abe529af
BP
97 */
98 uint64_t packet_count; /* Number of packets received. */
99 uint64_t byte_count; /* Number of bytes received. */
100
54a9cbc9
BP
101 tag_type tag; /* Caches rule_calculate_tag() result. */
102
abe529af
BP
103 struct list facets; /* List of "struct facet"s. */
104};
105
106static struct rule_dpif *rule_dpif_cast(const struct rule *rule)
107{
108 return rule ? CONTAINER_OF(rule, struct rule_dpif, up) : NULL;
109}
110
29901626 111static struct rule_dpif *rule_dpif_lookup(struct ofproto_dpif *,
c57b2226
BP
112 const struct flow *);
113static struct rule_dpif *rule_dpif_lookup__(struct ofproto_dpif *,
114 const struct flow *,
115 uint8_t table);
c376f9a3
IY
116static struct rule_dpif *rule_dpif_miss_rule(struct ofproto_dpif *ofproto,
117 const struct flow *flow);
abe529af 118
112bc5f4
BP
119static void rule_credit_stats(struct rule_dpif *,
120 const struct dpif_flow_stats *);
18b2a258 121static void flow_push_stats(struct rule_dpif *, const struct flow *,
112bc5f4 122 const struct dpif_flow_stats *);
822d9414 123static tag_type rule_calculate_tag(const struct flow *,
5cb7a798 124 const struct minimask *, uint32_t basis);
b0f7b9b5
BP
125static void rule_invalidate(const struct rule_dpif *);
126
abe529af
BP
127#define MAX_MIRRORS 32
128typedef uint32_t mirror_mask_t;
129#define MIRROR_MASK_C(X) UINT32_C(X)
130BUILD_ASSERT_DECL(sizeof(mirror_mask_t) * CHAR_BIT >= MAX_MIRRORS);
131struct ofmirror {
132 struct ofproto_dpif *ofproto; /* Owning ofproto. */
133 size_t idx; /* In ofproto's "mirrors" array. */
134 void *aux; /* Key supplied by ofproto's client. */
135 char *name; /* Identifier for log messages. */
136
137 /* Selection criteria. */
138 struct hmapx srcs; /* Contains "struct ofbundle *"s. */
139 struct hmapx dsts; /* Contains "struct ofbundle *"s. */
140 unsigned long *vlans; /* Bitmap of chosen VLANs, NULL selects all. */
141
9ba15e2a 142 /* Output (exactly one of out == NULL and out_vlan == -1 is true). */
abe529af
BP
143 struct ofbundle *out; /* Output port or NULL. */
144 int out_vlan; /* Output VLAN or -1. */
9ba15e2a 145 mirror_mask_t dup_mirrors; /* Bitmap of mirrors with the same output. */
9d24de3b
JP
146
147 /* Counters. */
148 int64_t packet_count; /* Number of packets sent. */
149 int64_t byte_count; /* Number of bytes sent. */
abe529af
BP
150};
151
152static void mirror_destroy(struct ofmirror *);
9d24de3b
JP
153static void update_mirror_stats(struct ofproto_dpif *ofproto,
154 mirror_mask_t mirrors,
155 uint64_t packets, uint64_t bytes);
abe529af 156
abe529af 157struct ofbundle {
abe529af 158 struct hmap_node hmap_node; /* In struct ofproto's "bundles" hmap. */
6e492d81 159 struct ofproto_dpif *ofproto; /* Owning ofproto. */
abe529af
BP
160 void *aux; /* Key supplied by ofproto's client. */
161 char *name; /* Identifier for log messages. */
162
163 /* Configuration. */
164 struct list ports; /* Contains "struct ofport"s. */
ecac4ebf 165 enum port_vlan_mode vlan_mode; /* VLAN mode */
abe529af
BP
166 int vlan; /* -1=trunk port, else a 12-bit VLAN ID. */
167 unsigned long *trunks; /* Bitmap of trunked VLANs, if 'vlan' == -1.
168 * NULL if all VLANs are trunked. */
169 struct lacp *lacp; /* LACP if LACP is enabled, otherwise NULL. */
170 struct bond *bond; /* Nonnull iff more than one port. */
5e9ceccd 171 bool use_priority_tags; /* Use 802.1p tag for frames in VLAN 0? */
abe529af
BP
172
173 /* Status. */
9e1fd49b 174 bool floodable; /* True if no port has OFPUTIL_PC_NO_FLOOD set. */
abe529af
BP
175
176 /* Port mirroring info. */
177 mirror_mask_t src_mirrors; /* Mirrors triggered when packet received. */
178 mirror_mask_t dst_mirrors; /* Mirrors triggered when packet sent. */
179 mirror_mask_t mirror_out; /* Mirrors that output to this bundle. */
180};
181
182static void bundle_remove(struct ofport *);
7bde8dd8 183static void bundle_update(struct ofbundle *);
abe529af
BP
184static void bundle_destroy(struct ofbundle *);
185static void bundle_del_port(struct ofport_dpif *);
186static void bundle_run(struct ofbundle *);
187static void bundle_wait(struct ofbundle *);
4acbc98d 188static struct ofbundle *lookup_input_bundle(const struct ofproto_dpif *,
70c2fd56
BP
189 uint16_t in_port, bool warn,
190 struct ofport_dpif **in_ofportp);
abe529af 191
33158a18
JP
192/* A controller may use OFPP_NONE as the ingress port to indicate that
193 * it did not arrive on a "real" port. 'ofpp_none_bundle' exists for
194 * when an input bundle is needed for validation (e.g., mirroring or
195 * OFPP_NORMAL processing). It is not connected to an 'ofproto' or have
196 * any 'port' structs, so care must be taken when dealing with it. */
197static struct ofbundle ofpp_none_bundle = {
198 .name = "OFPP_NONE",
199 .vlan_mode = PORT_VLAN_TRUNK
200};
201
21f7563c
JP
202static void stp_run(struct ofproto_dpif *ofproto);
203static void stp_wait(struct ofproto_dpif *ofproto);
851bf71d
EJ
204static int set_stp_port(struct ofport *,
205 const struct ofproto_port_stp_settings *);
21f7563c 206
5da5ec37
BP
207static bool ofbundle_includes_vlan(const struct ofbundle *, uint16_t vlan);
208
abe529af
BP
209struct action_xlate_ctx {
210/* action_xlate_ctx_init() initializes these members. */
211
212 /* The ofproto. */
213 struct ofproto_dpif *ofproto;
214
215 /* Flow to which the OpenFlow actions apply. xlate_actions() will modify
216 * this flow when actions change header fields. */
217 struct flow flow;
218
219 /* The packet corresponding to 'flow', or a null pointer if we are
220 * revalidating without a packet to refer to. */
221 const struct ofpbuf *packet;
222
3de9590b
BP
223 /* Should OFPP_NORMAL update the MAC learning table? Should "learn"
224 * actions update the flow table?
225 *
226 * We want to update these tables if we are actually processing a packet,
227 * or if we are accounting for packets that the datapath has processed, but
228 * not if we are just revalidating. */
229 bool may_learn;
75a75043 230
18b2a258
BP
231 /* The rule that we are currently translating, or NULL. */
232 struct rule_dpif *rule;
54834960 233
0e553d9c
BP
234 /* Union of the set of TCP flags seen so far in this flow. (Used only by
235 * NXAST_FIN_TIMEOUT. Set to zero to avoid updating updating rules'
236 * timeouts.) */
237 uint8_t tcp_flags;
238
112bc5f4
BP
239 /* If nonnull, flow translation calls this function just before executing a
240 * resubmit or OFPP_TABLE action. In addition, disables logging of traces
241 * when the recursion depth is exceeded.
242 *
243 * 'rule' is the rule being submitted into. It will be null if the
244 * resubmit or OFPP_TABLE action didn't find a matching rule.
245 *
246 * This is normally null so the client has to set it manually after
247 * calling action_xlate_ctx_init(). */
248 void (*resubmit_hook)(struct action_xlate_ctx *, struct rule_dpif *rule);
249
479df176
BP
250 /* If nonnull, flow translation calls this function to report some
251 * significant decision, e.g. to explain why OFPP_NORMAL translation
252 * dropped a packet. */
253 void (*report_hook)(struct action_xlate_ctx *, const char *s);
254
112bc5f4
BP
255 /* If nonnull, flow translation credits the specified statistics to each
256 * rule reached through a resubmit or OFPP_TABLE action.
abe529af
BP
257 *
258 * This is normally null so the client has to set it manually after
259 * calling action_xlate_ctx_init(). */
112bc5f4 260 const struct dpif_flow_stats *resubmit_stats;
abe529af 261
abe529af
BP
262/* xlate_actions() initializes and uses these members. The client might want
263 * to look at them after it returns. */
264
265 struct ofpbuf *odp_actions; /* Datapath actions. */
75a75043 266 tag_type tags; /* Tags associated with actions. */
6a7e895f 267 enum slow_path_reason slow; /* 0 if fast path may be used. */
75a75043
BP
268 bool has_learn; /* Actions include NXAST_LEARN? */
269 bool has_normal; /* Actions output to OFPP_NORMAL? */
0e553d9c 270 bool has_fin_timeout; /* Actions include NXAST_FIN_TIMEOUT? */
abe529af 271 uint16_t nf_output_iface; /* Output interface index for NetFlow. */
9d24de3b 272 mirror_mask_t mirrors; /* Bitmap of associated mirrors. */
abe529af
BP
273
274/* xlate_actions() initializes and uses these members, but the client has no
275 * reason to look at them. */
276
277 int recurse; /* Recursion level, via xlate_table_action. */
6a6455e5 278 bool max_resubmit_trigger; /* Recursed too deeply during translation. */
b3e9b2ed 279 struct flow base_flow; /* Flow at the last commit. */
deedf7e7 280 uint32_t orig_skb_priority; /* Priority when packet arrived. */
29901626 281 uint8_t table_id; /* OpenFlow table ID where flow was found. */
6ff686f2 282 uint32_t sflow_n_outputs; /* Number of output ports. */
9b56fe13 283 uint32_t sflow_odp_port; /* Output port for composing sFlow action. */
6ff686f2 284 uint16_t user_cookie_offset;/* Used for user_action_cookie fixup. */
848e8809 285 bool exit; /* No further actions should be processed. */
abe529af
BP
286};
287
288static void action_xlate_ctx_init(struct action_xlate_ctx *,
289 struct ofproto_dpif *, const struct flow *,
18b2a258 290 ovs_be16 initial_tci, struct rule_dpif *,
0e553d9c 291 uint8_t tcp_flags, const struct ofpbuf *);
050ac423 292static void xlate_actions(struct action_xlate_ctx *,
f25d0cf3 293 const struct ofpact *ofpacts, size_t ofpacts_len,
050ac423
BP
294 struct ofpbuf *odp_actions);
295static void xlate_actions_for_side_effects(struct action_xlate_ctx *,
f25d0cf3
BP
296 const struct ofpact *ofpacts,
297 size_t ofpacts_len);
0a740f48
EJ
298static void xlate_table_action(struct action_xlate_ctx *, uint16_t in_port,
299 uint8_t table_id, bool may_packet_in);
abe529af 300
6a7e895f
BP
301static size_t put_userspace_action(const struct ofproto_dpif *,
302 struct ofpbuf *odp_actions,
303 const struct flow *,
304 const union user_action_cookie *);
305
306static void compose_slow_path(const struct ofproto_dpif *, const struct flow *,
307 enum slow_path_reason,
308 uint64_t *stub, size_t stub_size,
309 const struct nlattr **actionsp,
310 size_t *actions_lenp);
311
479df176
BP
312static void xlate_report(struct action_xlate_ctx *ctx, const char *s);
313
6a7e895f
BP
314/* A subfacet (see "struct subfacet" below) has three possible installation
315 * states:
316 *
317 * - SF_NOT_INSTALLED: Not installed in the datapath. This will only be the
318 * case just after the subfacet is created, just before the subfacet is
319 * destroyed, or if the datapath returns an error when we try to install a
320 * subfacet.
321 *
322 * - SF_FAST_PATH: The subfacet's actions are installed in the datapath.
323 *
324 * - SF_SLOW_PATH: An action that sends every packet for the subfacet through
325 * ofproto_dpif is installed in the datapath.
326 */
327enum subfacet_path {
328 SF_NOT_INSTALLED, /* No datapath flow for this subfacet. */
329 SF_FAST_PATH, /* Full actions are installed. */
330 SF_SLOW_PATH, /* Send-to-userspace action is installed. */
331};
332
333static const char *subfacet_path_to_string(enum subfacet_path);
334
5f5fbd17
BP
335/* A dpif flow and actions associated with a facet.
336 *
337 * See also the large comment on struct facet. */
338struct subfacet {
339 /* Owners. */
340 struct hmap_node hmap_node; /* In struct ofproto_dpif 'subfacets' list. */
341 struct list list_node; /* In struct facet's 'facets' list. */
342 struct facet *facet; /* Owning facet. */
343
344 /* Key.
345 *
346 * To save memory in the common case, 'key' is NULL if 'key_fitness' is
347 * ODP_FIT_PERFECT, that is, odp_flow_key_from_flow() can accurately
348 * regenerate the ODP flow key from ->facet->flow. */
349 enum odp_key_fitness key_fitness;
350 struct nlattr *key;
351 int key_len;
352
353 long long int used; /* Time last used; time created if not used. */
354
355 uint64_t dp_packet_count; /* Last known packet count in the datapath. */
356 uint64_t dp_byte_count; /* Last known byte count in the datapath. */
357
358 /* Datapath actions.
359 *
360 * These should be essentially identical for every subfacet in a facet, but
361 * may differ in trivial ways due to VLAN splinters. */
362 size_t actions_len; /* Number of bytes in actions[]. */
363 struct nlattr *actions; /* Datapath actions. */
364
6a7e895f
BP
365 enum slow_path_reason slow; /* 0 if fast path may be used. */
366 enum subfacet_path path; /* Installed in datapath? */
5f5fbd17
BP
367
368 /* This value is normally the same as ->facet->flow.vlan_tci. Only VLAN
369 * splinters can cause it to differ. This value should be removed when
370 * the VLAN splinters feature is no longer needed. */
371 ovs_be16 initial_tci; /* Initial VLAN TCI value. */
a088a1ff
JP
372
373 /* Datapath port the packet arrived on. This is needed to remove
374 * flows for ports that are no longer part of the bridge. Since the
375 * flow definition only has the OpenFlow port number and the port is
376 * no longer part of the bridge, we can't determine the datapath port
377 * number needed to delete the flow from the datapath. */
378 uint32_t odp_in_port;
5f5fbd17
BP
379};
380
1d85f9e5
JP
381#define SUBFACET_DESTROY_MAX_BATCH 50
382
a088a1ff 383static struct subfacet *subfacet_create(struct facet *, struct flow_miss *miss,
459b16a1 384 long long int now);
5f5fbd17 385static struct subfacet *subfacet_find(struct ofproto_dpif *,
acf60855
JP
386 const struct nlattr *key, size_t key_len,
387 uint32_t key_hash,
388 const struct flow *flow);
5f5fbd17
BP
389static void subfacet_destroy(struct subfacet *);
390static void subfacet_destroy__(struct subfacet *);
1d85f9e5
JP
391static void subfacet_destroy_batch(struct ofproto_dpif *,
392 struct subfacet **, int n);
5f5fbd17
BP
393static void subfacet_get_key(struct subfacet *, struct odputil_keybuf *,
394 struct ofpbuf *key);
395static void subfacet_reset_dp_stats(struct subfacet *,
396 struct dpif_flow_stats *);
397static void subfacet_update_time(struct subfacet *, long long int used);
398static void subfacet_update_stats(struct subfacet *,
399 const struct dpif_flow_stats *);
400static void subfacet_make_actions(struct subfacet *,
5fe20d5d
BP
401 const struct ofpbuf *packet,
402 struct ofpbuf *odp_actions);
5f5fbd17
BP
403static int subfacet_install(struct subfacet *,
404 const struct nlattr *actions, size_t actions_len,
6a7e895f 405 struct dpif_flow_stats *, enum slow_path_reason);
5f5fbd17
BP
406static void subfacet_uninstall(struct subfacet *);
407
6a7e895f
BP
408static enum subfacet_path subfacet_want_path(enum slow_path_reason);
409
b0f7b9b5
BP
410/* An exact-match instantiation of an OpenFlow flow.
411 *
412 * A facet associates a "struct flow", which represents the Open vSwitch
b95fc6ba
BP
413 * userspace idea of an exact-match flow, with one or more subfacets. Each
414 * subfacet tracks the datapath's idea of the exact-match flow equivalent to
415 * the facet. When the kernel module (or other dpif implementation) and Open
416 * vSwitch userspace agree on the definition of a flow key, there is exactly
417 * one subfacet per facet. If the dpif implementation supports more-specific
418 * flow matching than userspace, however, a facet can have more than one
419 * subfacet, each of which corresponds to some distinction in flow that
420 * userspace simply doesn't understand.
b0f7b9b5
BP
421 *
422 * Flow expiration works in terms of subfacets, so a facet must have at least
423 * one subfacet or it will never expire, leaking memory. */
abe529af 424struct facet {
b0f7b9b5
BP
425 /* Owners. */
426 struct hmap_node hmap_node; /* In owning ofproto's 'facets' hmap. */
427 struct list list_node; /* In owning rule's 'facets' list. */
428 struct rule_dpif *rule; /* Owning rule. */
429
430 /* Owned data. */
431 struct list subfacets;
abe529af
BP
432 long long int used; /* Time last used; time created if not used. */
433
b0f7b9b5
BP
434 /* Key. */
435 struct flow flow;
436
abe529af
BP
437 /* These statistics:
438 *
439 * - Do include packets and bytes sent "by hand", e.g. with
440 * dpif_execute().
441 *
442 * - Do include packets and bytes that were obtained from the datapath
b0f7b9b5 443 * when a subfacet's statistics were reset (e.g. dpif_flow_put() with
abe529af 444 * DPIF_FP_ZERO_STATS).
b0f7b9b5
BP
445 *
446 * - Do not include packets or bytes that can be obtained from the
447 * datapath for any existing subfacet.
abe529af
BP
448 */
449 uint64_t packet_count; /* Number of packets received. */
450 uint64_t byte_count; /* Number of bytes received. */
451
b0f7b9b5 452 /* Resubmit statistics. */
9d24de3b
JP
453 uint64_t prev_packet_count; /* Number of packets from last stats push. */
454 uint64_t prev_byte_count; /* Number of bytes from last stats push. */
455 long long int prev_used; /* Used time from last stats push. */
abe529af 456
b0f7b9b5 457 /* Accounting. */
907a4c5e 458 uint64_t accounted_bytes; /* Bytes processed by facet_account(). */
b0f7b9b5 459 struct netflow_flow nf_flow; /* Per-flow NetFlow tracking data. */
0e553d9c 460 uint8_t tcp_flags; /* TCP flags seen for this 'rule'. */
abe529af 461
b95fc6ba
BP
462 /* Properties of datapath actions.
463 *
464 * Every subfacet has its own actions because actions can differ slightly
465 * between splintered and non-splintered subfacets due to the VLAN tag
466 * being initially different (present vs. absent). All of them have these
467 * properties in common so we just store one copy of them here. */
75a75043
BP
468 bool has_learn; /* Actions include NXAST_LEARN? */
469 bool has_normal; /* Actions output to OFPP_NORMAL? */
0e553d9c 470 bool has_fin_timeout; /* Actions include NXAST_FIN_TIMEOUT? */
b0f7b9b5 471 tag_type tags; /* Tags that would require revalidation. */
9d24de3b 472 mirror_mask_t mirrors; /* Bitmap of dependent mirrors. */
26cd7e34
BP
473
474 /* Storage for a single subfacet, to reduce malloc() time and space
475 * overhead. (A facet always has at least one subfacet and in the common
476 * case has exactly one subfacet.) */
477 struct subfacet one_subfacet;
abe529af
BP
478};
479
2b459b83
BP
480static struct facet *facet_create(struct rule_dpif *,
481 const struct flow *, uint32_t hash);
15baa734 482static void facet_remove(struct facet *);
abe529af
BP
483static void facet_free(struct facet *);
484
2b459b83
BP
485static struct facet *facet_find(struct ofproto_dpif *,
486 const struct flow *, uint32_t hash);
abe529af 487static struct facet *facet_lookup_valid(struct ofproto_dpif *,
2b459b83 488 const struct flow *, uint32_t hash);
c57b2226 489static void facet_revalidate(struct facet *);
6814e51f 490static bool facet_check_consistency(struct facet *);
abe529af 491
15baa734 492static void facet_flush_stats(struct facet *);
abe529af 493
15baa734 494static void facet_update_time(struct facet *, long long int used);
bbb5d219 495static void facet_reset_counters(struct facet *);
abe529af 496static void facet_push_stats(struct facet *);
3de9590b
BP
497static void facet_learn(struct facet *);
498static void facet_account(struct facet *);
abe529af
BP
499
500static bool facet_is_controller_flow(struct facet *);
501
abe529af 502struct ofport_dpif {
acf60855 503 struct hmap_node odp_port_node; /* In dpif_backer's "odp_to_ofport_map". */
abe529af
BP
504 struct ofport up;
505
506 uint32_t odp_port;
507 struct ofbundle *bundle; /* Bundle that contains this port, if any. */
508 struct list bundle_node; /* In struct ofbundle's "ports" list. */
509 struct cfm *cfm; /* Connectivity Fault Management, if any. */
510 tag_type tag; /* Tag associated with this port. */
015e08bc 511 bool may_enable; /* May be enabled in bonds. */
3e5b3fdb 512 long long int carrier_seq; /* Carrier status changes. */
b9ad7294 513 struct tnl_port *tnl_port; /* Tunnel handle, or null. */
21f7563c 514
52a90c29 515 /* Spanning tree. */
21f7563c
JP
516 struct stp_port *stp_port; /* Spanning Tree Protocol, if any. */
517 enum stp_state stp_state; /* Always STP_DISABLED if STP not in use. */
518 long long int stp_state_entered;
8b36f51e
EJ
519
520 struct hmap priorities; /* Map of attached 'priority_to_dscp's. */
52a90c29
BP
521
522 /* Linux VLAN device support (e.g. "eth0.10" for VLAN 10.)
523 *
524 * This is deprecated. It is only for compatibility with broken device
525 * drivers in old versions of Linux that do not properly support VLANs when
526 * VLAN devices are not used. When broken device drivers are no longer in
527 * widespread use, we will delete these interfaces. */
528 uint16_t realdev_ofp_port;
529 int vlandev_vid;
8b36f51e
EJ
530};
531
532/* Node in 'ofport_dpif''s 'priorities' map. Used to maintain a map from
533 * 'priority' (the datapath's term for QoS queue) to the dscp bits which all
534 * traffic egressing the 'ofport' with that priority should be marked with. */
535struct priority_to_dscp {
536 struct hmap_node hmap_node; /* Node in 'ofport_dpif''s 'priorities' map. */
537 uint32_t priority; /* Priority of this queue (see struct flow). */
538
539 uint8_t dscp; /* DSCP bits to mark outgoing traffic with. */
abe529af
BP
540};
541
52a90c29
BP
542/* Linux VLAN device support (e.g. "eth0.10" for VLAN 10.)
543 *
544 * This is deprecated. It is only for compatibility with broken device drivers
545 * in old versions of Linux that do not properly support VLANs when VLAN
546 * devices are not used. When broken device drivers are no longer in
547 * widespread use, we will delete these interfaces. */
548struct vlan_splinter {
549 struct hmap_node realdev_vid_node;
550 struct hmap_node vlandev_node;
551 uint16_t realdev_ofp_port;
552 uint16_t vlandev_ofp_port;
553 int vid;
554};
555
556static uint32_t vsp_realdev_to_vlandev(const struct ofproto_dpif *,
557 uint32_t realdev, ovs_be16 vlan_tci);
b98d8985 558static bool vsp_adjust_flow(const struct ofproto_dpif *, struct flow *);
52a90c29
BP
559static void vsp_remove(struct ofport_dpif *);
560static void vsp_add(struct ofport_dpif *, uint16_t realdev_ofp_port, int vid);
561
e1b1d06a
JP
562static uint32_t ofp_port_to_odp_port(const struct ofproto_dpif *,
563 uint16_t ofp_port);
564static uint16_t odp_port_to_ofp_port(const struct ofproto_dpif *,
565 uint32_t odp_port);
566
abe529af
BP
567static struct ofport_dpif *
568ofport_dpif_cast(const struct ofport *ofport)
569{
cb22974d 570 ovs_assert(ofport->ofproto->ofproto_class == &ofproto_dpif_class);
abe529af
BP
571 return ofport ? CONTAINER_OF(ofport, struct ofport_dpif, up) : NULL;
572}
573
574static void port_run(struct ofport_dpif *);
0aa66d6e 575static void port_run_fast(struct ofport_dpif *);
abe529af 576static void port_wait(struct ofport_dpif *);
a5610457 577static int set_cfm(struct ofport *, const struct cfm_settings *);
8b36f51e 578static void ofport_clear_priorities(struct ofport_dpif *);
abe529af 579
7ee20df1
BP
580struct dpif_completion {
581 struct list list_node;
582 struct ofoperation *op;
583};
584
54a9cbc9
BP
585/* Extra information about a classifier table.
586 * Currently used just for optimized flow revalidation. */
587struct table_dpif {
588 /* If either of these is nonnull, then this table has a form that allows
589 * flows to be tagged to avoid revalidating most flows for the most common
590 * kinds of flow table changes. */
591 struct cls_table *catchall_table; /* Table that wildcards all fields. */
592 struct cls_table *other_table; /* Table with any other wildcard set. */
593 uint32_t basis; /* Keeps each table's tags separate. */
594};
595
3c4a309c
BP
596/* Reasons that we might need to revalidate every facet, and corresponding
597 * coverage counters.
598 *
599 * A value of 0 means that there is no need to revalidate.
600 *
601 * It would be nice to have some cleaner way to integrate with coverage
602 * counters, but with only a few reasons I guess this is good enough for
603 * now. */
604enum revalidate_reason {
605 REV_RECONFIGURE = 1, /* Switch configuration changed. */
606 REV_STP, /* Spanning tree protocol port status change. */
607 REV_PORT_TOGGLED, /* Port enabled or disabled by CFM, LACP, ...*/
608 REV_FLOW_TABLE, /* Flow table changed. */
609 REV_INCONSISTENCY /* Facet self-check failed. */
610};
611COVERAGE_DEFINE(rev_reconfigure);
612COVERAGE_DEFINE(rev_stp);
613COVERAGE_DEFINE(rev_port_toggled);
614COVERAGE_DEFINE(rev_flow_table);
615COVERAGE_DEFINE(rev_inconsistency);
616
8f73d537
EJ
617/* Drop keys are odp flow keys which have drop flows installed in the kernel.
618 * These are datapath flows which have no associated ofproto, if they did we
619 * would use facets. */
620struct drop_key {
621 struct hmap_node hmap_node;
622 struct nlattr *key;
623 size_t key_len;
624};
625
acf60855
JP
626/* All datapaths of a given type share a single dpif backer instance. */
627struct dpif_backer {
628 char *type;
629 int refcount;
630 struct dpif *dpif;
631 struct timer next_expiration;
632 struct hmap odp_to_ofport_map; /* ODP port to ofport mapping. */
2cc3c58e 633
7d82ab2e 634 struct simap tnl_backers; /* Set of dpif ports backing tunnels. */
b9ad7294 635
2cc3c58e
EJ
636 /* Facet revalidation flags applying to facets which use this backer. */
637 enum revalidate_reason need_revalidate; /* Revalidate every facet. */
638 struct tag_set revalidate_set; /* Revalidate only matching facets. */
8f73d537
EJ
639
640 struct hmap drop_keys; /* Set of dropped odp keys. */
acf60855
JP
641};
642
643/* All existing ofproto_backer instances, indexed by ofproto->up.type. */
644static struct shash all_dpif_backers = SHASH_INITIALIZER(&all_dpif_backers);
645
8f73d537 646static void drop_key_clear(struct dpif_backer *);
acf60855
JP
647static struct ofport_dpif *
648odp_port_to_ofport(const struct dpif_backer *, uint32_t odp_port);
649
abe529af 650struct ofproto_dpif {
b44a10b7 651 struct hmap_node all_ofproto_dpifs_node; /* In 'all_ofproto_dpifs'. */
abe529af 652 struct ofproto up;
acf60855 653 struct dpif_backer *backer;
abe529af 654
c57b2226
BP
655 /* Special OpenFlow rules. */
656 struct rule_dpif *miss_rule; /* Sends flow table misses to controller. */
657 struct rule_dpif *no_packet_in_rule; /* Drops flow table misses. */
658
6c1491fb
BP
659 /* Statistics. */
660 uint64_t n_matches;
661
abe529af
BP
662 /* Bridging. */
663 struct netflow *netflow;
bae473fe 664 struct dpif_sflow *sflow;
abe529af
BP
665 struct hmap bundles; /* Contains "struct ofbundle"s. */
666 struct mac_learning *ml;
667 struct ofmirror *mirrors[MAX_MIRRORS];
ccb7c863 668 bool has_mirrors;
abe529af
BP
669 bool has_bonded_bundles;
670
abe529af
BP
671 /* Facets. */
672 struct hmap facets;
b0f7b9b5 673 struct hmap subfacets;
9d6ac44e 674 struct governor *governor;
54a9cbc9
BP
675
676 /* Revalidation. */
677 struct table_dpif tables[N_TABLES];
7ee20df1
BP
678
679 /* Support for debugging async flow mods. */
680 struct list completions;
daff3353
EJ
681
682 bool has_bundle_action; /* True when the first bundle action appears. */
6527c598
PS
683 struct netdev_stats stats; /* To account packets generated and consumed in
684 * userspace. */
21f7563c
JP
685
686 /* Spanning tree. */
687 struct stp *stp;
688 long long int stp_last_tick;
52a90c29
BP
689
690 /* VLAN splinters. */
691 struct hmap realdev_vid_map; /* (realdev,vid) -> vlandev. */
692 struct hmap vlandev_map; /* vlandev -> (realdev,vid). */
e1b1d06a 693
acf60855 694 /* Ports. */
0a740f48
EJ
695 struct sset ports; /* Set of standard port names. */
696 struct sset ghost_ports; /* Ports with no datapath port. */
acf60855
JP
697 struct sset port_poll_set; /* Queued names for port_poll() reply. */
698 int port_poll_errno; /* Last errno for port_poll() reply. */
abe529af
BP
699};
700
7ee20df1
BP
701/* Defer flow mod completion until "ovs-appctl ofproto/unclog"? (Useful only
702 * for debugging the asynchronous flow_mod implementation.) */
703static bool clogged;
704
b44a10b7
BP
705/* All existing ofproto_dpif instances, indexed by ->up.name. */
706static struct hmap all_ofproto_dpifs = HMAP_INITIALIZER(&all_ofproto_dpifs);
707
abe529af
BP
708static void ofproto_dpif_unixctl_init(void);
709
710static struct ofproto_dpif *
711ofproto_dpif_cast(const struct ofproto *ofproto)
712{
cb22974d 713 ovs_assert(ofproto->ofproto_class == &ofproto_dpif_class);
abe529af
BP
714 return CONTAINER_OF(ofproto, struct ofproto_dpif, up);
715}
716
4acbc98d 717static struct ofport_dpif *get_ofp_port(const struct ofproto_dpif *,
abe529af 718 uint16_t ofp_port);
4acbc98d 719static struct ofport_dpif *get_odp_port(const struct ofproto_dpif *,
abe529af 720 uint32_t odp_port);
6a6455e5
EJ
721static void ofproto_trace(struct ofproto_dpif *, const struct flow *,
722 const struct ofpbuf *, ovs_be16 initial_tci,
723 struct ds *);
abe529af
BP
724
725/* Packet processing. */
726static void update_learning_table(struct ofproto_dpif *,
727 const struct flow *, int vlan,
728 struct ofbundle *);
501f8d1f
BP
729/* Upcalls. */
730#define FLOW_MISS_MAX_BATCH 50
acf60855 731static int handle_upcalls(struct dpif_backer *, unsigned int max_batch);
abe529af
BP
732
733/* Flow expiration. */
acf60855 734static int expire(struct dpif_backer *);
abe529af 735
6fca1ffb
BP
736/* NetFlow. */
737static void send_netflow_active_timeouts(struct ofproto_dpif *);
738
abe529af 739/* Utilities. */
52a90c29 740static int send_packet(const struct ofport_dpif *, struct ofpbuf *packet);
6a7d1a39
BP
741static size_t compose_sflow_action(const struct ofproto_dpif *,
742 struct ofpbuf *odp_actions,
743 const struct flow *, uint32_t odp_port);
c06bba01
JP
744static void add_mirror_actions(struct action_xlate_ctx *ctx,
745 const struct flow *flow);
abe529af
BP
746/* Global variables. */
747static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
acf60855
JP
748
749/* Initial mappings of port to bridge mappings. */
750static struct shash init_ofp_ports = SHASH_INITIALIZER(&init_ofp_ports);
abe529af
BP
751\f
752/* Factory functions. */
753
b0408fca 754static void
acf60855 755init(const struct shash *iface_hints)
b0408fca 756{
acf60855
JP
757 struct shash_node *node;
758
759 /* Make a local copy, since we don't own 'iface_hints' elements. */
760 SHASH_FOR_EACH(node, iface_hints) {
761 const struct iface_hint *orig_hint = node->data;
762 struct iface_hint *new_hint = xmalloc(sizeof *new_hint);
763
764 new_hint->br_name = xstrdup(orig_hint->br_name);
765 new_hint->br_type = xstrdup(orig_hint->br_type);
766 new_hint->ofp_port = orig_hint->ofp_port;
767
768 shash_add(&init_ofp_ports, node->name, new_hint);
769 }
b0408fca
JP
770}
771
abe529af
BP
772static void
773enumerate_types(struct sset *types)
774{
775 dp_enumerate_types(types);
776}
777
778static int
779enumerate_names(const char *type, struct sset *names)
780{
acf60855
JP
781 struct ofproto_dpif *ofproto;
782
783 sset_clear(names);
784 HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_node, &all_ofproto_dpifs) {
785 if (strcmp(type, ofproto->up.type)) {
786 continue;
787 }
788 sset_add(names, ofproto->up.name);
789 }
790
791 return 0;
abe529af
BP
792}
793
794static int
795del(const char *type, const char *name)
796{
797 struct dpif *dpif;
798 int error;
799
800 error = dpif_open(name, type, &dpif);
801 if (!error) {
802 error = dpif_delete(dpif);
803 dpif_close(dpif);
804 }
805 return error;
806}
807\f
0aeaabc8
JP
808static const char *
809port_open_type(const char *datapath_type, const char *port_type)
810{
811 return dpif_port_open_type(datapath_type, port_type);
812}
813
acf60855
JP
814/* Type functions. */
815
476cb42a
BP
816static struct ofproto_dpif *
817lookup_ofproto_dpif_by_port_name(const char *name)
818{
819 struct ofproto_dpif *ofproto;
820
821 HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_node, &all_ofproto_dpifs) {
822 if (sset_contains(&ofproto->ports, name)) {
823 return ofproto;
824 }
825 }
826
827 return NULL;
828}
829
acf60855
JP
830static int
831type_run(const char *type)
832{
833 struct dpif_backer *backer;
834 char *devname;
835 int error;
836
837 backer = shash_find_data(&all_dpif_backers, type);
838 if (!backer) {
839 /* This is not necessarily a problem, since backers are only
840 * created on demand. */
841 return 0;
842 }
843
844 dpif_run(backer->dpif);
845
2cc3c58e
EJ
846 if (backer->need_revalidate
847 || !tag_set_is_empty(&backer->revalidate_set)) {
848 struct tag_set revalidate_set = backer->revalidate_set;
849 bool need_revalidate = backer->need_revalidate;
850 struct ofproto_dpif *ofproto;
a614d823
KM
851 struct simap_node *node;
852 struct simap tmp_backers;
853
854 /* Handle tunnel garbage collection. */
855 simap_init(&tmp_backers);
856 simap_swap(&backer->tnl_backers, &tmp_backers);
857
858 HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_node, &all_ofproto_dpifs) {
859 struct ofport_dpif *iter;
860
861 if (backer != ofproto->backer) {
862 continue;
863 }
864
865 HMAP_FOR_EACH (iter, up.hmap_node, &ofproto->up.ports) {
866 const char *dp_port;
867
868 if (!iter->tnl_port) {
869 continue;
870 }
871
872 dp_port = netdev_vport_get_dpif_port(iter->up.netdev);
873 node = simap_find(&tmp_backers, dp_port);
874 if (node) {
875 simap_put(&backer->tnl_backers, dp_port, node->data);
876 simap_delete(&tmp_backers, node);
877 node = simap_find(&backer->tnl_backers, dp_port);
878 } else {
879 node = simap_find(&backer->tnl_backers, dp_port);
880 if (!node) {
881 uint32_t odp_port = UINT32_MAX;
882
883 if (!dpif_port_add(backer->dpif, iter->up.netdev,
884 &odp_port)) {
885 simap_put(&backer->tnl_backers, dp_port, odp_port);
886 node = simap_find(&backer->tnl_backers, dp_port);
887 }
888 }
889 }
890
891 iter->odp_port = node ? node->data : OVSP_NONE;
892 if (tnl_port_reconfigure(&iter->up, iter->odp_port,
893 &iter->tnl_port)) {
894 backer->need_revalidate = REV_RECONFIGURE;
895 }
896 }
897 }
898
899 SIMAP_FOR_EACH (node, &tmp_backers) {
900 dpif_port_del(backer->dpif, node->data);
901 }
902 simap_destroy(&tmp_backers);
2cc3c58e
EJ
903
904 switch (backer->need_revalidate) {
905 case REV_RECONFIGURE: COVERAGE_INC(rev_reconfigure); break;
906 case REV_STP: COVERAGE_INC(rev_stp); break;
907 case REV_PORT_TOGGLED: COVERAGE_INC(rev_port_toggled); break;
908 case REV_FLOW_TABLE: COVERAGE_INC(rev_flow_table); break;
909 case REV_INCONSISTENCY: COVERAGE_INC(rev_inconsistency); break;
910 }
911
8f73d537
EJ
912 if (backer->need_revalidate) {
913 /* Clear the drop_keys in case we should now be accepting some
914 * formerly dropped flows. */
915 drop_key_clear(backer);
916 }
917
f728af2e
BP
918 /* Clear the revalidation flags. */
919 tag_set_init(&backer->revalidate_set);
920 backer->need_revalidate = 0;
921
2cc3c58e
EJ
922 HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_node, &all_ofproto_dpifs) {
923 struct facet *facet;
924
925 if (ofproto->backer != backer) {
926 continue;
927 }
928
2cc3c58e
EJ
929 HMAP_FOR_EACH (facet, hmap_node, &ofproto->facets) {
930 if (need_revalidate
931 || tag_set_intersects(&revalidate_set, facet->tags)) {
932 facet_revalidate(facet);
933 }
934 }
935 }
2cc3c58e
EJ
936 }
937
acf60855
JP
938 if (timer_expired(&backer->next_expiration)) {
939 int delay = expire(backer);
940 timer_set_duration(&backer->next_expiration, delay);
941 }
942
943 /* Check for port changes in the dpif. */
944 while ((error = dpif_port_poll(backer->dpif, &devname)) == 0) {
476cb42a 945 struct ofproto_dpif *ofproto;
acf60855
JP
946 struct dpif_port port;
947
948 /* Don't report on the datapath's device. */
949 if (!strcmp(devname, dpif_base_name(backer->dpif))) {
c83b89ab 950 goto next;
acf60855
JP
951 }
952
b9ad7294
EJ
953 HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_node,
954 &all_ofproto_dpifs) {
7d82ab2e 955 if (simap_contains(&ofproto->backer->tnl_backers, devname)) {
b9ad7294
EJ
956 goto next;
957 }
958 }
959
476cb42a 960 ofproto = lookup_ofproto_dpif_by_port_name(devname);
acf60855
JP
961 if (dpif_port_query_by_name(backer->dpif, devname, &port)) {
962 /* The port was removed. If we know the datapath,
963 * report it through poll_set(). If we don't, it may be
964 * notifying us of a removal we initiated, so ignore it.
965 * If there's a pending ENOBUFS, let it stand, since
966 * everything will be reevaluated. */
967 if (ofproto && ofproto->port_poll_errno != ENOBUFS) {
968 sset_add(&ofproto->port_poll_set, devname);
969 ofproto->port_poll_errno = 0;
970 }
acf60855
JP
971 } else if (!ofproto) {
972 /* The port was added, but we don't know with which
973 * ofproto we should associate it. Delete it. */
974 dpif_port_del(backer->dpif, port.port_no);
975 }
5b5e6a4c 976 dpif_port_destroy(&port);
acf60855 977
c83b89ab 978 next:
acf60855
JP
979 free(devname);
980 }
981
982 if (error != EAGAIN) {
983 struct ofproto_dpif *ofproto;
984
985 /* There was some sort of error, so propagate it to all
986 * ofprotos that use this backer. */
987 HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_node,
988 &all_ofproto_dpifs) {
989 if (ofproto->backer == backer) {
990 sset_clear(&ofproto->port_poll_set);
991 ofproto->port_poll_errno = error;
992 }
993 }
994 }
995
996 return 0;
997}
998
999static int
1000type_run_fast(const char *type)
1001{
1002 struct dpif_backer *backer;
1003 unsigned int work;
1004
1005 backer = shash_find_data(&all_dpif_backers, type);
1006 if (!backer) {
1007 /* This is not necessarily a problem, since backers are only
1008 * created on demand. */
1009 return 0;
1010 }
1011
1012 /* Handle one or more batches of upcalls, until there's nothing left to do
1013 * or until we do a fixed total amount of work.
1014 *
1015 * We do work in batches because it can be much cheaper to set up a number
1016 * of flows and fire off their patches all at once. We do multiple batches
1017 * because in some cases handling a packet can cause another packet to be
1018 * queued almost immediately as part of the return flow. Both
1019 * optimizations can make major improvements on some benchmarks and
1020 * presumably for real traffic as well. */
1021 work = 0;
1022 while (work < FLOW_MISS_MAX_BATCH) {
1023 int retval = handle_upcalls(backer, FLOW_MISS_MAX_BATCH - work);
1024 if (retval <= 0) {
1025 return -retval;
1026 }
1027 work += retval;
1028 }
1029
1030 return 0;
1031}
1032
1033static void
1034type_wait(const char *type)
1035{
1036 struct dpif_backer *backer;
1037
1038 backer = shash_find_data(&all_dpif_backers, type);
1039 if (!backer) {
1040 /* This is not necessarily a problem, since backers are only
1041 * created on demand. */
1042 return;
1043 }
1044
1045 timer_wait(&backer->next_expiration);
1046}
1047\f
abe529af
BP
1048/* Basic life-cycle. */
1049
c57b2226
BP
1050static int add_internal_flows(struct ofproto_dpif *);
1051
abe529af
BP
1052static struct ofproto *
1053alloc(void)
1054{
1055 struct ofproto_dpif *ofproto = xmalloc(sizeof *ofproto);
1056 return &ofproto->up;
1057}
1058
1059static void
1060dealloc(struct ofproto *ofproto_)
1061{
1062 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
1063 free(ofproto);
1064}
1065
acf60855
JP
1066static void
1067close_dpif_backer(struct dpif_backer *backer)
1068{
1069 struct shash_node *node;
1070
cb22974d 1071 ovs_assert(backer->refcount > 0);
acf60855
JP
1072
1073 if (--backer->refcount) {
1074 return;
1075 }
1076
8f73d537
EJ
1077 drop_key_clear(backer);
1078 hmap_destroy(&backer->drop_keys);
1079
7d82ab2e 1080 simap_destroy(&backer->tnl_backers);
acf60855
JP
1081 hmap_destroy(&backer->odp_to_ofport_map);
1082 node = shash_find(&all_dpif_backers, backer->type);
1083 free(backer->type);
1084 shash_delete(&all_dpif_backers, node);
1085 dpif_close(backer->dpif);
1086
1087 free(backer);
1088}
1089
1090/* Datapath port slated for removal from datapath. */
1091struct odp_garbage {
1092 struct list list_node;
1093 uint32_t odp_port;
1094};
1095
1096static int
1097open_dpif_backer(const char *type, struct dpif_backer **backerp)
1098{
1099 struct dpif_backer *backer;
1100 struct dpif_port_dump port_dump;
1101 struct dpif_port port;
1102 struct shash_node *node;
1103 struct list garbage_list;
1104 struct odp_garbage *garbage, *next;
1105 struct sset names;
1106 char *backer_name;
1107 const char *name;
1108 int error;
1109
1110 backer = shash_find_data(&all_dpif_backers, type);
1111 if (backer) {
1112 backer->refcount++;
1113 *backerp = backer;
1114 return 0;
1115 }
1116
1117 backer_name = xasprintf("ovs-%s", type);
1118
1119 /* Remove any existing datapaths, since we assume we're the only
1120 * userspace controlling the datapath. */
1121 sset_init(&names);
1122 dp_enumerate_names(type, &names);
1123 SSET_FOR_EACH(name, &names) {
1124 struct dpif *old_dpif;
1125
1126 /* Don't remove our backer if it exists. */
1127 if (!strcmp(name, backer_name)) {
1128 continue;
1129 }
1130
1131 if (dpif_open(name, type, &old_dpif)) {
1132 VLOG_WARN("couldn't open old datapath %s to remove it", name);
1133 } else {
1134 dpif_delete(old_dpif);
1135 dpif_close(old_dpif);
1136 }
1137 }
1138 sset_destroy(&names);
1139
1140 backer = xmalloc(sizeof *backer);
1141
1142 error = dpif_create_and_open(backer_name, type, &backer->dpif);
1143 free(backer_name);
1144 if (error) {
1145 VLOG_ERR("failed to open datapath of type %s: %s", type,
1146 strerror(error));
4c1b1289 1147 free(backer);
acf60855
JP
1148 return error;
1149 }
1150
1151 backer->type = xstrdup(type);
1152 backer->refcount = 1;
1153 hmap_init(&backer->odp_to_ofport_map);
8f73d537 1154 hmap_init(&backer->drop_keys);
acf60855 1155 timer_set_duration(&backer->next_expiration, 1000);
2cc3c58e 1156 backer->need_revalidate = 0;
7d82ab2e 1157 simap_init(&backer->tnl_backers);
2cc3c58e 1158 tag_set_init(&backer->revalidate_set);
acf60855
JP
1159 *backerp = backer;
1160
1161 dpif_flow_flush(backer->dpif);
1162
1163 /* Loop through the ports already on the datapath and remove any
1164 * that we don't need anymore. */
1165 list_init(&garbage_list);
1166 dpif_port_dump_start(&port_dump, backer->dpif);
1167 while (dpif_port_dump_next(&port_dump, &port)) {
1168 node = shash_find(&init_ofp_ports, port.name);
1169 if (!node && strcmp(port.name, dpif_base_name(backer->dpif))) {
1170 garbage = xmalloc(sizeof *garbage);
1171 garbage->odp_port = port.port_no;
1172 list_push_front(&garbage_list, &garbage->list_node);
1173 }
1174 }
1175 dpif_port_dump_done(&port_dump);
1176
1177 LIST_FOR_EACH_SAFE (garbage, next, list_node, &garbage_list) {
1178 dpif_port_del(backer->dpif, garbage->odp_port);
1179 list_remove(&garbage->list_node);
1180 free(garbage);
1181 }
1182
1183 shash_add(&all_dpif_backers, type, backer);
1184
1185 error = dpif_recv_set(backer->dpif, true);
1186 if (error) {
1187 VLOG_ERR("failed to listen on datapath of type %s: %s",
1188 type, strerror(error));
1189 close_dpif_backer(backer);
1190 return error;
1191 }
1192
1193 return error;
1194}
1195
abe529af 1196static int
0f5f95a9 1197construct(struct ofproto *ofproto_)
abe529af
BP
1198{
1199 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
acf60855 1200 struct shash_node *node, *next;
91858960 1201 int max_ports;
abe529af
BP
1202 int error;
1203 int i;
1204
acf60855 1205 error = open_dpif_backer(ofproto->up.type, &ofproto->backer);
abe529af 1206 if (error) {
abe529af
BP
1207 return error;
1208 }
1209
acf60855 1210 max_ports = dpif_get_max_ports(ofproto->backer->dpif);
91858960
BP
1211 ofproto_init_max_ports(ofproto_, MIN(max_ports, OFPP_MAX));
1212
6c1491fb 1213 ofproto->n_matches = 0;
abe529af 1214
abe529af
BP
1215 ofproto->netflow = NULL;
1216 ofproto->sflow = NULL;
21f7563c 1217 ofproto->stp = NULL;
abe529af 1218 hmap_init(&ofproto->bundles);
e764773c 1219 ofproto->ml = mac_learning_create(MAC_ENTRY_DEFAULT_IDLE_TIME);
abe529af
BP
1220 for (i = 0; i < MAX_MIRRORS; i++) {
1221 ofproto->mirrors[i] = NULL;
1222 }
1223 ofproto->has_bonded_bundles = false;
1224
abe529af 1225 hmap_init(&ofproto->facets);
b0f7b9b5 1226 hmap_init(&ofproto->subfacets);
9d6ac44e 1227 ofproto->governor = NULL;
54a9cbc9
BP
1228
1229 for (i = 0; i < N_TABLES; i++) {
1230 struct table_dpif *table = &ofproto->tables[i];
1231
1232 table->catchall_table = NULL;
1233 table->other_table = NULL;
1234 table->basis = random_uint32();
1235 }
abe529af 1236
7ee20df1
BP
1237 list_init(&ofproto->completions);
1238
abe529af
BP
1239 ofproto_dpif_unixctl_init();
1240
ccb7c863 1241 ofproto->has_mirrors = false;
daff3353
EJ
1242 ofproto->has_bundle_action = false;
1243
52a90c29
BP
1244 hmap_init(&ofproto->vlandev_map);
1245 hmap_init(&ofproto->realdev_vid_map);
1246
acf60855 1247 sset_init(&ofproto->ports);
0a740f48 1248 sset_init(&ofproto->ghost_ports);
acf60855
JP
1249 sset_init(&ofproto->port_poll_set);
1250 ofproto->port_poll_errno = 0;
1251
1252 SHASH_FOR_EACH_SAFE (node, next, &init_ofp_ports) {
4f9e08a5 1253 struct iface_hint *iface_hint = node->data;
acf60855
JP
1254
1255 if (!strcmp(iface_hint->br_name, ofproto->up.name)) {
1256 /* Check if the datapath already has this port. */
1257 if (dpif_port_exists(ofproto->backer->dpif, node->name)) {
1258 sset_add(&ofproto->ports, node->name);
1259 }
1260
1261 free(iface_hint->br_name);
1262 free(iface_hint->br_type);
4f9e08a5 1263 free(iface_hint);
acf60855
JP
1264 shash_delete(&init_ofp_ports, node);
1265 }
1266 }
e1b1d06a 1267
b44a10b7
BP
1268 hmap_insert(&all_ofproto_dpifs, &ofproto->all_ofproto_dpifs_node,
1269 hash_string(ofproto->up.name, 0));
6527c598 1270 memset(&ofproto->stats, 0, sizeof ofproto->stats);
0f5f95a9
BP
1271
1272 ofproto_init_tables(ofproto_, N_TABLES);
c57b2226
BP
1273 error = add_internal_flows(ofproto);
1274 ofproto->up.tables[TBL_INTERNAL].flags = OFTABLE_HIDDEN | OFTABLE_READONLY;
1275
1276 return error;
1277}
1278
1279static int
1280add_internal_flow(struct ofproto_dpif *ofproto, int id,
f25d0cf3 1281 const struct ofpbuf *ofpacts, struct rule_dpif **rulep)
c57b2226
BP
1282{
1283 struct ofputil_flow_mod fm;
1284 int error;
1285
81a76618
BP
1286 match_init_catchall(&fm.match);
1287 fm.priority = 0;
1288 match_set_reg(&fm.match, 0, id);
623e1caf 1289 fm.new_cookie = htonll(0);
c57b2226
BP
1290 fm.cookie = htonll(0);
1291 fm.cookie_mask = htonll(0);
1292 fm.table_id = TBL_INTERNAL;
1293 fm.command = OFPFC_ADD;
1294 fm.idle_timeout = 0;
1295 fm.hard_timeout = 0;
1296 fm.buffer_id = 0;
1297 fm.out_port = 0;
1298 fm.flags = 0;
f25d0cf3
BP
1299 fm.ofpacts = ofpacts->data;
1300 fm.ofpacts_len = ofpacts->size;
c57b2226
BP
1301
1302 error = ofproto_flow_mod(&ofproto->up, &fm);
1303 if (error) {
1304 VLOG_ERR_RL(&rl, "failed to add internal flow %d (%s)",
1305 id, ofperr_to_string(error));
1306 return error;
1307 }
1308
81a76618 1309 *rulep = rule_dpif_lookup__(ofproto, &fm.match.flow, TBL_INTERNAL);
cb22974d 1310 ovs_assert(*rulep != NULL);
0f5f95a9 1311
abe529af
BP
1312 return 0;
1313}
1314
c57b2226
BP
1315static int
1316add_internal_flows(struct ofproto_dpif *ofproto)
1317{
f25d0cf3
BP
1318 struct ofpact_controller *controller;
1319 uint64_t ofpacts_stub[128 / 8];
1320 struct ofpbuf ofpacts;
c57b2226
BP
1321 int error;
1322 int id;
1323
f25d0cf3 1324 ofpbuf_use_stack(&ofpacts, ofpacts_stub, sizeof ofpacts_stub);
c57b2226
BP
1325 id = 1;
1326
f25d0cf3
BP
1327 controller = ofpact_put_CONTROLLER(&ofpacts);
1328 controller->max_len = UINT16_MAX;
1329 controller->controller_id = 0;
1330 controller->reason = OFPR_NO_MATCH;
1331 ofpact_pad(&ofpacts);
1332
1333 error = add_internal_flow(ofproto, id++, &ofpacts, &ofproto->miss_rule);
c57b2226
BP
1334 if (error) {
1335 return error;
1336 }
1337
f25d0cf3
BP
1338 ofpbuf_clear(&ofpacts);
1339 error = add_internal_flow(ofproto, id++, &ofpacts,
c57b2226
BP
1340 &ofproto->no_packet_in_rule);
1341 return error;
1342}
1343
7ee20df1
BP
1344static void
1345complete_operations(struct ofproto_dpif *ofproto)
1346{
1347 struct dpif_completion *c, *next;
1348
1349 LIST_FOR_EACH_SAFE (c, next, list_node, &ofproto->completions) {
1350 ofoperation_complete(c->op, 0);
1351 list_remove(&c->list_node);
1352 free(c);
1353 }
1354}
1355
abe529af
BP
1356static void
1357destruct(struct ofproto *ofproto_)
1358{
1359 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
7ee20df1 1360 struct rule_dpif *rule, *next_rule;
d0918789 1361 struct oftable *table;
abe529af
BP
1362 int i;
1363
b44a10b7 1364 hmap_remove(&all_ofproto_dpifs, &ofproto->all_ofproto_dpifs_node);
7ee20df1
BP
1365 complete_operations(ofproto);
1366
0697b5c3
BP
1367 OFPROTO_FOR_EACH_TABLE (table, &ofproto->up) {
1368 struct cls_cursor cursor;
1369
d0918789 1370 cls_cursor_init(&cursor, &table->cls, NULL);
0697b5c3
BP
1371 CLS_CURSOR_FOR_EACH_SAFE (rule, next_rule, up.cr, &cursor) {
1372 ofproto_rule_destroy(&rule->up);
1373 }
7ee20df1
BP
1374 }
1375
abe529af
BP
1376 for (i = 0; i < MAX_MIRRORS; i++) {
1377 mirror_destroy(ofproto->mirrors[i]);
1378 }
1379
1380 netflow_destroy(ofproto->netflow);
bae473fe 1381 dpif_sflow_destroy(ofproto->sflow);
abe529af
BP
1382 hmap_destroy(&ofproto->bundles);
1383 mac_learning_destroy(ofproto->ml);
1384
1385 hmap_destroy(&ofproto->facets);
b0f7b9b5 1386 hmap_destroy(&ofproto->subfacets);
9d6ac44e 1387 governor_destroy(ofproto->governor);
abe529af 1388
52a90c29
BP
1389 hmap_destroy(&ofproto->vlandev_map);
1390 hmap_destroy(&ofproto->realdev_vid_map);
1391
acf60855 1392 sset_destroy(&ofproto->ports);
0a740f48 1393 sset_destroy(&ofproto->ghost_ports);
acf60855 1394 sset_destroy(&ofproto->port_poll_set);
e1b1d06a 1395
acf60855 1396 close_dpif_backer(ofproto->backer);
abe529af
BP
1397}
1398
1399static int
5fcc0d00 1400run_fast(struct ofproto *ofproto_)
abe529af
BP
1401{
1402 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
0aa66d6e 1403 struct ofport_dpif *ofport;
abe529af 1404
0aa66d6e
EJ
1405 HMAP_FOR_EACH (ofport, up.hmap_node, &ofproto->up.ports) {
1406 port_run_fast(ofport);
1407 }
1408
5fcc0d00
BP
1409 return 0;
1410}
1411
1412static int
1413run(struct ofproto *ofproto_)
1414{
1415 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
1416 struct ofport_dpif *ofport;
1417 struct ofbundle *bundle;
1418 int error;
1419
1420 if (!clogged) {
1421 complete_operations(ofproto);
1422 }
5fcc0d00
BP
1423
1424 error = run_fast(ofproto_);
1425 if (error) {
1426 return error;
abe529af
BP
1427 }
1428
abe529af 1429 if (ofproto->netflow) {
6fca1ffb
BP
1430 if (netflow_run(ofproto->netflow)) {
1431 send_netflow_active_timeouts(ofproto);
1432 }
abe529af
BP
1433 }
1434 if (ofproto->sflow) {
bae473fe 1435 dpif_sflow_run(ofproto->sflow);
abe529af
BP
1436 }
1437
1438 HMAP_FOR_EACH (ofport, up.hmap_node, &ofproto->up.ports) {
1439 port_run(ofport);
1440 }
1441 HMAP_FOR_EACH (bundle, hmap_node, &ofproto->bundles) {
1442 bundle_run(bundle);
1443 }
1444
21f7563c 1445 stp_run(ofproto);
2cc3c58e 1446 mac_learning_run(ofproto->ml, &ofproto->backer->revalidate_set);
abe529af 1447
6814e51f 1448 /* Check the consistency of a random facet, to aid debugging. */
2cc3c58e
EJ
1449 if (!hmap_is_empty(&ofproto->facets)
1450 && !ofproto->backer->need_revalidate) {
6814e51f
BP
1451 struct facet *facet;
1452
1453 facet = CONTAINER_OF(hmap_random_node(&ofproto->facets),
1454 struct facet, hmap_node);
2cc3c58e
EJ
1455 if (!tag_set_intersects(&ofproto->backer->revalidate_set,
1456 facet->tags)) {
6814e51f 1457 if (!facet_check_consistency(facet)) {
2cc3c58e 1458 ofproto->backer->need_revalidate = REV_INCONSISTENCY;
6814e51f
BP
1459 }
1460 }
1461 }
1462
9d6ac44e
BP
1463 if (ofproto->governor) {
1464 size_t n_subfacets;
1465
1466 governor_run(ofproto->governor);
1467
1468 /* If the governor has shrunk to its minimum size and the number of
1469 * subfacets has dwindled, then drop the governor entirely.
1470 *
1471 * For hysteresis, the number of subfacets to drop the governor is
1472 * smaller than the number needed to trigger its creation. */
1473 n_subfacets = hmap_count(&ofproto->subfacets);
1474 if (n_subfacets * 4 < ofproto->up.flow_eviction_threshold
1475 && governor_is_idle(ofproto->governor)) {
1476 governor_destroy(ofproto->governor);
1477 ofproto->governor = NULL;
1478 }
1479 }
1480
abe529af
BP
1481 return 0;
1482}
1483
1484static void
1485wait(struct ofproto *ofproto_)
1486{
1487 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
1488 struct ofport_dpif *ofport;
1489 struct ofbundle *bundle;
1490
7ee20df1
BP
1491 if (!clogged && !list_is_empty(&ofproto->completions)) {
1492 poll_immediate_wake();
1493 }
1494
acf60855
JP
1495 dpif_wait(ofproto->backer->dpif);
1496 dpif_recv_wait(ofproto->backer->dpif);
abe529af 1497 if (ofproto->sflow) {
bae473fe 1498 dpif_sflow_wait(ofproto->sflow);
abe529af 1499 }
2cc3c58e 1500 if (!tag_set_is_empty(&ofproto->backer->revalidate_set)) {
abe529af
BP
1501 poll_immediate_wake();
1502 }
1503 HMAP_FOR_EACH (ofport, up.hmap_node, &ofproto->up.ports) {
1504 port_wait(ofport);
1505 }
1506 HMAP_FOR_EACH (bundle, hmap_node, &ofproto->bundles) {
1507 bundle_wait(bundle);
1508 }
6fca1ffb
BP
1509 if (ofproto->netflow) {
1510 netflow_wait(ofproto->netflow);
1511 }
1c313b88 1512 mac_learning_wait(ofproto->ml);
21f7563c 1513 stp_wait(ofproto);
2cc3c58e 1514 if (ofproto->backer->need_revalidate) {
abe529af
BP
1515 /* Shouldn't happen, but if it does just go around again. */
1516 VLOG_DBG_RL(&rl, "need revalidate in ofproto_wait_cb()");
1517 poll_immediate_wake();
abe529af 1518 }
9d6ac44e
BP
1519 if (ofproto->governor) {
1520 governor_wait(ofproto->governor);
1521 }
abe529af
BP
1522}
1523
0d085684
BP
1524static void
1525get_memory_usage(const struct ofproto *ofproto_, struct simap *usage)
1526{
1527 const struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
1528
1529 simap_increase(usage, "facets", hmap_count(&ofproto->facets));
1530 simap_increase(usage, "subfacets", hmap_count(&ofproto->subfacets));
1531}
1532
abe529af
BP
1533static void
1534flush(struct ofproto *ofproto_)
1535{
1536 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
acf60855
JP
1537 struct subfacet *subfacet, *next_subfacet;
1538 struct subfacet *batch[SUBFACET_DESTROY_MAX_BATCH];
1539 int n_batch;
b0f7b9b5 1540
acf60855
JP
1541 n_batch = 0;
1542 HMAP_FOR_EACH_SAFE (subfacet, next_subfacet, hmap_node,
1543 &ofproto->subfacets) {
1544 if (subfacet->path != SF_NOT_INSTALLED) {
1545 batch[n_batch++] = subfacet;
1546 if (n_batch >= SUBFACET_DESTROY_MAX_BATCH) {
1547 subfacet_destroy_batch(ofproto, batch, n_batch);
1548 n_batch = 0;
1549 }
1550 } else {
1551 subfacet_destroy(subfacet);
b0f7b9b5 1552 }
abe529af 1553 }
acf60855
JP
1554
1555 if (n_batch > 0) {
1556 subfacet_destroy_batch(ofproto, batch, n_batch);
1557 }
abe529af
BP
1558}
1559
6c1491fb
BP
1560static void
1561get_features(struct ofproto *ofproto_ OVS_UNUSED,
9e1fd49b 1562 bool *arp_match_ip, enum ofputil_action_bitmap *actions)
6c1491fb
BP
1563{
1564 *arp_match_ip = true;
9e1fd49b
BP
1565 *actions = (OFPUTIL_A_OUTPUT |
1566 OFPUTIL_A_SET_VLAN_VID |
1567 OFPUTIL_A_SET_VLAN_PCP |
1568 OFPUTIL_A_STRIP_VLAN |
1569 OFPUTIL_A_SET_DL_SRC |
1570 OFPUTIL_A_SET_DL_DST |
1571 OFPUTIL_A_SET_NW_SRC |
1572 OFPUTIL_A_SET_NW_DST |
1573 OFPUTIL_A_SET_NW_TOS |
1574 OFPUTIL_A_SET_TP_SRC |
1575 OFPUTIL_A_SET_TP_DST |
1576 OFPUTIL_A_ENQUEUE);
6c1491fb
BP
1577}
1578
1579static void
307975da 1580get_tables(struct ofproto *ofproto_, struct ofp12_table_stats *ots)
6c1491fb
BP
1581{
1582 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
a8d9304d 1583 struct dpif_dp_stats s;
6c1491fb
BP
1584
1585 strcpy(ots->name, "classifier");
1586
acf60855
JP
1587 dpif_get_dp_stats(ofproto->backer->dpif, &s);
1588
307975da
SH
1589 ots->lookup_count = htonll(s.n_hit + s.n_missed);
1590 ots->matched_count = htonll(s.n_hit + ofproto->n_matches);
6c1491fb
BP
1591}
1592
abe529af
BP
1593static struct ofport *
1594port_alloc(void)
1595{
1596 struct ofport_dpif *port = xmalloc(sizeof *port);
1597 return &port->up;
1598}
1599
1600static void
1601port_dealloc(struct ofport *port_)
1602{
1603 struct ofport_dpif *port = ofport_dpif_cast(port_);
1604 free(port);
1605}
1606
1607static int
1608port_construct(struct ofport *port_)
1609{
1610 struct ofport_dpif *port = ofport_dpif_cast(port_);
1611 struct ofproto_dpif *ofproto = ofproto_dpif_cast(port->up.ofproto);
b9ad7294 1612 const struct netdev *netdev = port->up.netdev;
e1b1d06a
JP
1613 struct dpif_port dpif_port;
1614 int error;
abe529af 1615
2cc3c58e 1616 ofproto->backer->need_revalidate = REV_RECONFIGURE;
abe529af
BP
1617 port->bundle = NULL;
1618 port->cfm = NULL;
1619 port->tag = tag_create_random();
d5ffa7f2 1620 port->may_enable = true;
21f7563c
JP
1621 port->stp_port = NULL;
1622 port->stp_state = STP_DISABLED;
b9ad7294 1623 port->tnl_port = NULL;
8b36f51e 1624 hmap_init(&port->priorities);
52a90c29
BP
1625 port->realdev_ofp_port = 0;
1626 port->vlandev_vid = 0;
b9ad7294 1627 port->carrier_seq = netdev_get_carrier_resets(netdev);
abe529af 1628
b9ad7294 1629 if (netdev_vport_is_patch(netdev)) {
0a740f48
EJ
1630 /* XXX By bailing out here, we don't do required sFlow work. */
1631 port->odp_port = OVSP_NONE;
1632 return 0;
1633 }
1634
acf60855 1635 error = dpif_port_query_by_name(ofproto->backer->dpif,
b9ad7294 1636 netdev_vport_get_dpif_port(netdev),
e1b1d06a
JP
1637 &dpif_port);
1638 if (error) {
1639 return error;
1640 }
1641
1642 port->odp_port = dpif_port.port_no;
1643
b9ad7294
EJ
1644 if (netdev_get_tunnel_config(netdev)) {
1645 port->tnl_port = tnl_port_add(&port->up, port->odp_port);
1646 } else {
1647 /* Sanity-check that a mapping doesn't already exist. This
1648 * shouldn't happen for non-tunnel ports. */
1649 if (odp_port_to_ofp_port(ofproto, port->odp_port) != OFPP_NONE) {
1650 VLOG_ERR("port %s already has an OpenFlow port number",
1651 dpif_port.name);
da78d43d 1652 dpif_port_destroy(&dpif_port);
b9ad7294
EJ
1653 return EBUSY;
1654 }
e1b1d06a 1655
b9ad7294
EJ
1656 hmap_insert(&ofproto->backer->odp_to_ofport_map, &port->odp_port_node,
1657 hash_int(port->odp_port, 0));
1658 }
da78d43d 1659 dpif_port_destroy(&dpif_port);
e1b1d06a 1660
abe529af 1661 if (ofproto->sflow) {
e1b1d06a 1662 dpif_sflow_add_port(ofproto->sflow, port_, port->odp_port);
abe529af
BP
1663 }
1664
1665 return 0;
1666}
1667
1668static void
1669port_destruct(struct ofport *port_)
1670{
1671 struct ofport_dpif *port = ofport_dpif_cast(port_);
1672 struct ofproto_dpif *ofproto = ofproto_dpif_cast(port->up.ofproto);
b9ad7294 1673 const char *dp_port_name = netdev_vport_get_dpif_port(port->up.netdev);
02f8d646 1674 const char *devname = netdev_get_name(port->up.netdev);
abe529af 1675
a614d823 1676 if (dpif_port_exists(ofproto->backer->dpif, dp_port_name)) {
acf60855
JP
1677 /* The underlying device is still there, so delete it. This
1678 * happens when the ofproto is being destroyed, since the caller
1679 * assumes that removal of attached ports will happen as part of
1680 * destruction. */
a614d823
KM
1681 if (!port->tnl_port) {
1682 dpif_port_del(ofproto->backer->dpif, port->odp_port);
1683 }
1684 ofproto->backer->need_revalidate = REV_RECONFIGURE;
acf60855
JP
1685 }
1686
b9ad7294 1687 if (port->odp_port != OVSP_NONE && !port->tnl_port) {
0a740f48
EJ
1688 hmap_remove(&ofproto->backer->odp_to_ofport_map, &port->odp_port_node);
1689 }
1690
b9ad7294 1691 tnl_port_del(port->tnl_port);
02f8d646 1692 sset_find_and_delete(&ofproto->ports, devname);
0a740f48 1693 sset_find_and_delete(&ofproto->ghost_ports, devname);
2cc3c58e 1694 ofproto->backer->need_revalidate = REV_RECONFIGURE;
abe529af 1695 bundle_remove(port_);
a5610457 1696 set_cfm(port_, NULL);
abe529af 1697 if (ofproto->sflow) {
bae473fe 1698 dpif_sflow_del_port(ofproto->sflow, port->odp_port);
abe529af 1699 }
8b36f51e
EJ
1700
1701 ofport_clear_priorities(port);
1702 hmap_destroy(&port->priorities);
abe529af
BP
1703}
1704
1705static void
1706port_modified(struct ofport *port_)
1707{
1708 struct ofport_dpif *port = ofport_dpif_cast(port_);
1709
1710 if (port->bundle && port->bundle->bond) {
1711 bond_slave_set_netdev(port->bundle->bond, port, port->up.netdev);
1712 }
1713}
1714
1715static void
9e1fd49b 1716port_reconfigured(struct ofport *port_, enum ofputil_port_config old_config)
abe529af
BP
1717{
1718 struct ofport_dpif *port = ofport_dpif_cast(port_);
1719 struct ofproto_dpif *ofproto = ofproto_dpif_cast(port->up.ofproto);
9e1fd49b 1720 enum ofputil_port_config changed = old_config ^ port->up.pp.config;
abe529af 1721
9e1fd49b 1722 if (changed & (OFPUTIL_PC_NO_RECV | OFPUTIL_PC_NO_RECV_STP |
c57b2226
BP
1723 OFPUTIL_PC_NO_FWD | OFPUTIL_PC_NO_FLOOD |
1724 OFPUTIL_PC_NO_PACKET_IN)) {
2cc3c58e 1725 ofproto->backer->need_revalidate = REV_RECONFIGURE;
7bde8dd8 1726
9e1fd49b 1727 if (changed & OFPUTIL_PC_NO_FLOOD && port->bundle) {
7bde8dd8
JP
1728 bundle_update(port->bundle);
1729 }
abe529af
BP
1730 }
1731}
1732
1733static int
1734set_sflow(struct ofproto *ofproto_,
1735 const struct ofproto_sflow_options *sflow_options)
1736{
1737 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
bae473fe 1738 struct dpif_sflow *ds = ofproto->sflow;
6ff686f2 1739
abe529af 1740 if (sflow_options) {
bae473fe 1741 if (!ds) {
abe529af
BP
1742 struct ofport_dpif *ofport;
1743
4213f19d 1744 ds = ofproto->sflow = dpif_sflow_create();
abe529af 1745 HMAP_FOR_EACH (ofport, up.hmap_node, &ofproto->up.ports) {
e1b1d06a 1746 dpif_sflow_add_port(ds, &ofport->up, ofport->odp_port);
abe529af 1747 }
2cc3c58e 1748 ofproto->backer->need_revalidate = REV_RECONFIGURE;
abe529af 1749 }
bae473fe 1750 dpif_sflow_set_options(ds, sflow_options);
abe529af 1751 } else {
6ff686f2
PS
1752 if (ds) {
1753 dpif_sflow_destroy(ds);
2cc3c58e 1754 ofproto->backer->need_revalidate = REV_RECONFIGURE;
6ff686f2
PS
1755 ofproto->sflow = NULL;
1756 }
abe529af
BP
1757 }
1758 return 0;
1759}
1760
1761static int
a5610457 1762set_cfm(struct ofport *ofport_, const struct cfm_settings *s)
abe529af
BP
1763{
1764 struct ofport_dpif *ofport = ofport_dpif_cast(ofport_);
1765 int error;
1766
a5610457 1767 if (!s) {
abe529af
BP
1768 error = 0;
1769 } else {
1770 if (!ofport->cfm) {
8c977421
EJ
1771 struct ofproto_dpif *ofproto;
1772
1773 ofproto = ofproto_dpif_cast(ofport->up.ofproto);
2cc3c58e 1774 ofproto->backer->need_revalidate = REV_RECONFIGURE;
6f629657 1775 ofport->cfm = cfm_create(netdev_get_name(ofport->up.netdev));
abe529af
BP
1776 }
1777
a5610457 1778 if (cfm_configure(ofport->cfm, s)) {
abe529af
BP
1779 return 0;
1780 }
1781
1782 error = EINVAL;
1783 }
1784 cfm_destroy(ofport->cfm);
1785 ofport->cfm = NULL;
1786 return error;
1787}
1788
1789static int
a5610457 1790get_cfm_fault(const struct ofport *ofport_)
abe529af
BP
1791{
1792 struct ofport_dpif *ofport = ofport_dpif_cast(ofport_);
a5610457
EJ
1793
1794 return ofport->cfm ? cfm_get_fault(ofport->cfm) : -1;
abe529af 1795}
1de11730 1796
1c0333b6
EJ
1797static int
1798get_cfm_opup(const struct ofport *ofport_)
1799{
1800 struct ofport_dpif *ofport = ofport_dpif_cast(ofport_);
1801
1802 return ofport->cfm ? cfm_get_opup(ofport->cfm) : -1;
1803}
1804
1de11730
EJ
1805static int
1806get_cfm_remote_mpids(const struct ofport *ofport_, const uint64_t **rmps,
1807 size_t *n_rmps)
1808{
1809 struct ofport_dpif *ofport = ofport_dpif_cast(ofport_);
1810
1811 if (ofport->cfm) {
1812 cfm_get_remote_mpids(ofport->cfm, rmps, n_rmps);
1813 return 0;
1814 } else {
1815 return -1;
1816 }
1817}
3967a833
MM
1818
1819static int
1820get_cfm_health(const struct ofport *ofport_)
1821{
1822 struct ofport_dpif *ofport = ofport_dpif_cast(ofport_);
1823
1824 return ofport->cfm ? cfm_get_health(ofport->cfm) : -1;
1825}
abe529af 1826\f
21f7563c
JP
1827/* Spanning Tree. */
1828
1829static void
1830send_bpdu_cb(struct ofpbuf *pkt, int port_num, void *ofproto_)
1831{
1832 struct ofproto_dpif *ofproto = ofproto_;
1833 struct stp_port *sp = stp_get_port(ofproto->stp, port_num);
1834 struct ofport_dpif *ofport;
1835
1836 ofport = stp_port_get_aux(sp);
1837 if (!ofport) {
1838 VLOG_WARN_RL(&rl, "%s: cannot send BPDU on unknown port %d",
1839 ofproto->up.name, port_num);
1840 } else {
1841 struct eth_header *eth = pkt->l2;
1842
1843 netdev_get_etheraddr(ofport->up.netdev, eth->eth_src);
1844 if (eth_addr_is_zero(eth->eth_src)) {
1845 VLOG_WARN_RL(&rl, "%s: cannot send BPDU on port %d "
1846 "with unknown MAC", ofproto->up.name, port_num);
1847 } else {
97d6520b 1848 send_packet(ofport, pkt);
21f7563c
JP
1849 }
1850 }
1851 ofpbuf_delete(pkt);
1852}
1853
1854/* Configures STP on 'ofproto_' using the settings defined in 's'. */
1855static int
1856set_stp(struct ofproto *ofproto_, const struct ofproto_stp_settings *s)
1857{
1858 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
1859
1860 /* Only revalidate flows if the configuration changed. */
1861 if (!s != !ofproto->stp) {
2cc3c58e 1862 ofproto->backer->need_revalidate = REV_RECONFIGURE;
21f7563c
JP
1863 }
1864
1865 if (s) {
1866 if (!ofproto->stp) {
1867 ofproto->stp = stp_create(ofproto_->name, s->system_id,
1868 send_bpdu_cb, ofproto);
1869 ofproto->stp_last_tick = time_msec();
1870 }
1871
1872 stp_set_bridge_id(ofproto->stp, s->system_id);
1873 stp_set_bridge_priority(ofproto->stp, s->priority);
1874 stp_set_hello_time(ofproto->stp, s->hello_time);
1875 stp_set_max_age(ofproto->stp, s->max_age);
1876 stp_set_forward_delay(ofproto->stp, s->fwd_delay);
1877 } else {
851bf71d
EJ
1878 struct ofport *ofport;
1879
1880 HMAP_FOR_EACH (ofport, hmap_node, &ofproto->up.ports) {
1881 set_stp_port(ofport, NULL);
1882 }
1883
21f7563c
JP
1884 stp_destroy(ofproto->stp);
1885 ofproto->stp = NULL;
1886 }
1887
1888 return 0;
1889}
1890
1891static int
1892get_stp_status(struct ofproto *ofproto_, struct ofproto_stp_status *s)
1893{
1894 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
1895
1896 if (ofproto->stp) {
1897 s->enabled = true;
1898 s->bridge_id = stp_get_bridge_id(ofproto->stp);
1899 s->designated_root = stp_get_designated_root(ofproto->stp);
1900 s->root_path_cost = stp_get_root_path_cost(ofproto->stp);
1901 } else {
1902 s->enabled = false;
1903 }
1904
1905 return 0;
1906}
1907
1908static void
1909update_stp_port_state(struct ofport_dpif *ofport)
1910{
1911 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofport->up.ofproto);
1912 enum stp_state state;
1913
1914 /* Figure out new state. */
1915 state = ofport->stp_port ? stp_port_get_state(ofport->stp_port)
1916 : STP_DISABLED;
1917
1918 /* Update state. */
1919 if (ofport->stp_state != state) {
9e1fd49b 1920 enum ofputil_port_state of_state;
21f7563c
JP
1921 bool fwd_change;
1922
1923 VLOG_DBG_RL(&rl, "port %s: STP state changed from %s to %s",
1924 netdev_get_name(ofport->up.netdev),
1925 stp_state_name(ofport->stp_state),
1926 stp_state_name(state));
1927 if (stp_learn_in_state(ofport->stp_state)
1928 != stp_learn_in_state(state)) {
1929 /* xxx Learning action flows should also be flushed. */
2cc3c58e
EJ
1930 mac_learning_flush(ofproto->ml,
1931 &ofproto->backer->revalidate_set);
21f7563c
JP
1932 }
1933 fwd_change = stp_forward_in_state(ofport->stp_state)
1934 != stp_forward_in_state(state);
1935
2cc3c58e 1936 ofproto->backer->need_revalidate = REV_STP;
21f7563c
JP
1937 ofport->stp_state = state;
1938 ofport->stp_state_entered = time_msec();
1939
b308140a 1940 if (fwd_change && ofport->bundle) {
21f7563c
JP
1941 bundle_update(ofport->bundle);
1942 }
1943
1944 /* Update the STP state bits in the OpenFlow port description. */
9e1fd49b
BP
1945 of_state = ofport->up.pp.state & ~OFPUTIL_PS_STP_MASK;
1946 of_state |= (state == STP_LISTENING ? OFPUTIL_PS_STP_LISTEN
1947 : state == STP_LEARNING ? OFPUTIL_PS_STP_LEARN
1948 : state == STP_FORWARDING ? OFPUTIL_PS_STP_FORWARD
1949 : state == STP_BLOCKING ? OFPUTIL_PS_STP_BLOCK
1950 : 0);
21f7563c
JP
1951 ofproto_port_set_state(&ofport->up, of_state);
1952 }
1953}
1954
1955/* Configures STP on 'ofport_' using the settings defined in 's'. The
1956 * caller is responsible for assigning STP port numbers and ensuring
1957 * there are no duplicates. */
1958static int
1959set_stp_port(struct ofport *ofport_,
1960 const struct ofproto_port_stp_settings *s)
1961{
1962 struct ofport_dpif *ofport = ofport_dpif_cast(ofport_);
1963 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofport->up.ofproto);
1964 struct stp_port *sp = ofport->stp_port;
1965
1966 if (!s || !s->enable) {
1967 if (sp) {
1968 ofport->stp_port = NULL;
1969 stp_port_disable(sp);
ecd12731 1970 update_stp_port_state(ofport);
21f7563c
JP
1971 }
1972 return 0;
1973 } else if (sp && stp_port_no(sp) != s->port_num
1974 && ofport == stp_port_get_aux(sp)) {
1975 /* The port-id changed, so disable the old one if it's not
1976 * already in use by another port. */
1977 stp_port_disable(sp);
1978 }
1979
1980 sp = ofport->stp_port = stp_get_port(ofproto->stp, s->port_num);
1981 stp_port_enable(sp);
1982
1983 stp_port_set_aux(sp, ofport);
1984 stp_port_set_priority(sp, s->priority);
1985 stp_port_set_path_cost(sp, s->path_cost);
1986
1987 update_stp_port_state(ofport);
1988
1989 return 0;
1990}
1991
1992static int
1993get_stp_port_status(struct ofport *ofport_,
1994 struct ofproto_port_stp_status *s)
1995{
1996 struct ofport_dpif *ofport = ofport_dpif_cast(ofport_);
1997 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofport->up.ofproto);
1998 struct stp_port *sp = ofport->stp_port;
1999
2000 if (!ofproto->stp || !sp) {
2001 s->enabled = false;
2002 return 0;
2003 }
2004
2005 s->enabled = true;
2006 s->port_id = stp_port_get_id(sp);
2007 s->state = stp_port_get_state(sp);
2008 s->sec_in_state = (time_msec() - ofport->stp_state_entered) / 1000;
2009 s->role = stp_port_get_role(sp);
80740385 2010 stp_port_get_counts(sp, &s->tx_count, &s->rx_count, &s->error_count);
21f7563c
JP
2011
2012 return 0;
2013}
2014
2015static void
2016stp_run(struct ofproto_dpif *ofproto)
2017{
2018 if (ofproto->stp) {
2019 long long int now = time_msec();
2020 long long int elapsed = now - ofproto->stp_last_tick;
2021 struct stp_port *sp;
2022
2023 if (elapsed > 0) {
2024 stp_tick(ofproto->stp, MIN(INT_MAX, elapsed));
2025 ofproto->stp_last_tick = now;
2026 }
2027 while (stp_get_changed_port(ofproto->stp, &sp)) {
2028 struct ofport_dpif *ofport = stp_port_get_aux(sp);
2029
2030 if (ofport) {
2031 update_stp_port_state(ofport);
2032 }
2033 }
6ae50723
EJ
2034
2035 if (stp_check_and_reset_fdb_flush(ofproto->stp)) {
2cc3c58e 2036 mac_learning_flush(ofproto->ml, &ofproto->backer->revalidate_set);
6ae50723 2037 }
21f7563c
JP
2038 }
2039}
2040
2041static void
2042stp_wait(struct ofproto_dpif *ofproto)
2043{
2044 if (ofproto->stp) {
2045 poll_timer_wait(1000);
2046 }
2047}
2048
2049/* Returns true if STP should process 'flow'. */
2050static bool
2051stp_should_process_flow(const struct flow *flow)
2052{
2053 return eth_addr_equals(flow->dl_dst, eth_addr_stp);
2054}
2055
2056static void
2057stp_process_packet(const struct ofport_dpif *ofport,
2058 const struct ofpbuf *packet)
2059{
2060 struct ofpbuf payload = *packet;
2061 struct eth_header *eth = payload.data;
2062 struct stp_port *sp = ofport->stp_port;
2063
2064 /* Sink packets on ports that have STP disabled when the bridge has
2065 * STP enabled. */
2066 if (!sp || stp_port_get_state(sp) == STP_DISABLED) {
2067 return;
2068 }
2069
2070 /* Trim off padding on payload. */
c573540b
BP
2071 if (payload.size > ntohs(eth->eth_type) + ETH_HEADER_LEN) {
2072 payload.size = ntohs(eth->eth_type) + ETH_HEADER_LEN;
21f7563c
JP
2073 }
2074
2075 if (ofpbuf_try_pull(&payload, ETH_HEADER_LEN + LLC_HEADER_LEN)) {
2076 stp_received_bpdu(sp, payload.data, payload.size);
2077 }
2078}
2079\f
8b36f51e
EJ
2080static struct priority_to_dscp *
2081get_priority(const struct ofport_dpif *ofport, uint32_t priority)
2082{
2083 struct priority_to_dscp *pdscp;
2084 uint32_t hash;
2085
2086 hash = hash_int(priority, 0);
2087 HMAP_FOR_EACH_IN_BUCKET (pdscp, hmap_node, hash, &ofport->priorities) {
2088 if (pdscp->priority == priority) {
2089 return pdscp;
2090 }
2091 }
2092 return NULL;
2093}
2094
2095static void
2096ofport_clear_priorities(struct ofport_dpif *ofport)
2097{
2098 struct priority_to_dscp *pdscp, *next;
2099
2100 HMAP_FOR_EACH_SAFE (pdscp, next, hmap_node, &ofport->priorities) {
2101 hmap_remove(&ofport->priorities, &pdscp->hmap_node);
2102 free(pdscp);
2103 }
2104}
2105
2106static int
2107set_queues(struct ofport *ofport_,
2108 const struct ofproto_port_queue *qdscp_list,
2109 size_t n_qdscp)
2110{
2111 struct ofport_dpif *ofport = ofport_dpif_cast(ofport_);
2112 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofport->up.ofproto);
2113 struct hmap new = HMAP_INITIALIZER(&new);
2114 size_t i;
2115
2116 for (i = 0; i < n_qdscp; i++) {
2117 struct priority_to_dscp *pdscp;
2118 uint32_t priority;
2119 uint8_t dscp;
2120
2121 dscp = (qdscp_list[i].dscp << 2) & IP_DSCP_MASK;
acf60855 2122 if (dpif_queue_to_priority(ofproto->backer->dpif, qdscp_list[i].queue,
8b36f51e
EJ
2123 &priority)) {
2124 continue;
2125 }
2126
2127 pdscp = get_priority(ofport, priority);
2128 if (pdscp) {
2129 hmap_remove(&ofport->priorities, &pdscp->hmap_node);
2130 } else {
2131 pdscp = xmalloc(sizeof *pdscp);
2132 pdscp->priority = priority;
2133 pdscp->dscp = dscp;
2cc3c58e 2134 ofproto->backer->need_revalidate = REV_RECONFIGURE;
8b36f51e
EJ
2135 }
2136
2137 if (pdscp->dscp != dscp) {
2138 pdscp->dscp = dscp;
2cc3c58e 2139 ofproto->backer->need_revalidate = REV_RECONFIGURE;
8b36f51e
EJ
2140 }
2141
2142 hmap_insert(&new, &pdscp->hmap_node, hash_int(pdscp->priority, 0));
2143 }
2144
2145 if (!hmap_is_empty(&ofport->priorities)) {
2146 ofport_clear_priorities(ofport);
2cc3c58e 2147 ofproto->backer->need_revalidate = REV_RECONFIGURE;
8b36f51e
EJ
2148 }
2149
2150 hmap_swap(&new, &ofport->priorities);
2151 hmap_destroy(&new);
2152
2153 return 0;
2154}
2155\f
abe529af
BP
2156/* Bundles. */
2157
b44a10b7
BP
2158/* Expires all MAC learning entries associated with 'bundle' and forces its
2159 * ofproto to revalidate every flow.
2160 *
2161 * Normally MAC learning entries are removed only from the ofproto associated
2162 * with 'bundle', but if 'all_ofprotos' is true, then the MAC learning entries
2163 * are removed from every ofproto. When patch ports and SLB bonds are in use
2164 * and a VM migration happens and the gratuitous ARPs are somehow lost, this
2165 * avoids a MAC_ENTRY_IDLE_TIME delay before the migrated VM can communicate
2166 * with the host from which it migrated. */
abe529af 2167static void
b44a10b7 2168bundle_flush_macs(struct ofbundle *bundle, bool all_ofprotos)
abe529af
BP
2169{
2170 struct ofproto_dpif *ofproto = bundle->ofproto;
2171 struct mac_learning *ml = ofproto->ml;
2172 struct mac_entry *mac, *next_mac;
2173
2cc3c58e 2174 ofproto->backer->need_revalidate = REV_RECONFIGURE;
abe529af
BP
2175 LIST_FOR_EACH_SAFE (mac, next_mac, lru_node, &ml->lrus) {
2176 if (mac->port.p == bundle) {
b44a10b7
BP
2177 if (all_ofprotos) {
2178 struct ofproto_dpif *o;
2179
2180 HMAP_FOR_EACH (o, all_ofproto_dpifs_node, &all_ofproto_dpifs) {
2181 if (o != ofproto) {
2182 struct mac_entry *e;
2183
2184 e = mac_learning_lookup(o->ml, mac->mac, mac->vlan,
2185 NULL);
2186 if (e) {
b44a10b7
BP
2187 mac_learning_expire(o->ml, e);
2188 }
2189 }
2190 }
2191 }
2192
abe529af
BP
2193 mac_learning_expire(ml, mac);
2194 }
2195 }
2196}
2197
2198static struct ofbundle *
2199bundle_lookup(const struct ofproto_dpif *ofproto, void *aux)
2200{
2201 struct ofbundle *bundle;
2202
2203 HMAP_FOR_EACH_IN_BUCKET (bundle, hmap_node, hash_pointer(aux, 0),
2204 &ofproto->bundles) {
2205 if (bundle->aux == aux) {
2206 return bundle;
2207 }
2208 }
2209 return NULL;
2210}
2211
2212/* Looks up each of the 'n_auxes' pointers in 'auxes' as bundles and adds the
2213 * ones that are found to 'bundles'. */
2214static void
2215bundle_lookup_multiple(struct ofproto_dpif *ofproto,
2216 void **auxes, size_t n_auxes,
2217 struct hmapx *bundles)
2218{
2219 size_t i;
2220
2221 hmapx_init(bundles);
2222 for (i = 0; i < n_auxes; i++) {
2223 struct ofbundle *bundle = bundle_lookup(ofproto, auxes[i]);
2224 if (bundle) {
2225 hmapx_add(bundles, bundle);
2226 }
2227 }
2228}
2229
7bde8dd8
JP
2230static void
2231bundle_update(struct ofbundle *bundle)
2232{
2233 struct ofport_dpif *port;
2234
2235 bundle->floodable = true;
2236 LIST_FOR_EACH (port, bundle_node, &bundle->ports) {
9e1fd49b
BP
2237 if (port->up.pp.config & OFPUTIL_PC_NO_FLOOD
2238 || !stp_forward_in_state(port->stp_state)) {
7bde8dd8
JP
2239 bundle->floodable = false;
2240 break;
2241 }
2242 }
2243}
2244
abe529af
BP
2245static void
2246bundle_del_port(struct ofport_dpif *port)
2247{
2248 struct ofbundle *bundle = port->bundle;
2249
2cc3c58e 2250 bundle->ofproto->backer->need_revalidate = REV_RECONFIGURE;
6f77f4ae 2251
abe529af
BP
2252 list_remove(&port->bundle_node);
2253 port->bundle = NULL;
2254
2255 if (bundle->lacp) {
2256 lacp_slave_unregister(bundle->lacp, port);
2257 }
2258 if (bundle->bond) {
2259 bond_slave_unregister(bundle->bond, port);
2260 }
2261
7bde8dd8 2262 bundle_update(bundle);
abe529af
BP
2263}
2264
2265static bool
2266bundle_add_port(struct ofbundle *bundle, uint32_t ofp_port,
df53d41c 2267 struct lacp_slave_settings *lacp)
abe529af
BP
2268{
2269 struct ofport_dpif *port;
2270
2271 port = get_ofp_port(bundle->ofproto, ofp_port);
2272 if (!port) {
2273 return false;
2274 }
2275
2276 if (port->bundle != bundle) {
2cc3c58e 2277 bundle->ofproto->backer->need_revalidate = REV_RECONFIGURE;
abe529af
BP
2278 if (port->bundle) {
2279 bundle_del_port(port);
2280 }
2281
2282 port->bundle = bundle;
2283 list_push_back(&bundle->ports, &port->bundle_node);
9e1fd49b
BP
2284 if (port->up.pp.config & OFPUTIL_PC_NO_FLOOD
2285 || !stp_forward_in_state(port->stp_state)) {
abe529af
BP
2286 bundle->floodable = false;
2287 }
2288 }
2289 if (lacp) {
2cc3c58e 2290 bundle->ofproto->backer->need_revalidate = REV_RECONFIGURE;
abe529af
BP
2291 lacp_slave_register(bundle->lacp, port, lacp);
2292 }
2293
2294 return true;
2295}
2296
2297static void
2298bundle_destroy(struct ofbundle *bundle)
2299{
2300 struct ofproto_dpif *ofproto;
2301 struct ofport_dpif *port, *next_port;
2302 int i;
2303
2304 if (!bundle) {
2305 return;
2306 }
2307
2308 ofproto = bundle->ofproto;
2309 for (i = 0; i < MAX_MIRRORS; i++) {
2310 struct ofmirror *m = ofproto->mirrors[i];
2311 if (m) {
2312 if (m->out == bundle) {
2313 mirror_destroy(m);
2314 } else if (hmapx_find_and_delete(&m->srcs, bundle)
2315 || hmapx_find_and_delete(&m->dsts, bundle)) {
2cc3c58e 2316 ofproto->backer->need_revalidate = REV_RECONFIGURE;
abe529af
BP
2317 }
2318 }
2319 }
2320
2321 LIST_FOR_EACH_SAFE (port, next_port, bundle_node, &bundle->ports) {
2322 bundle_del_port(port);
2323 }
2324
b44a10b7 2325 bundle_flush_macs(bundle, true);
abe529af
BP
2326 hmap_remove(&ofproto->bundles, &bundle->hmap_node);
2327 free(bundle->name);
2328 free(bundle->trunks);
2329 lacp_destroy(bundle->lacp);
2330 bond_destroy(bundle->bond);
2331 free(bundle);
2332}
2333
2334static int
2335bundle_set(struct ofproto *ofproto_, void *aux,
2336 const struct ofproto_bundle_settings *s)
2337{
2338 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
2339 bool need_flush = false;
abe529af
BP
2340 struct ofport_dpif *port;
2341 struct ofbundle *bundle;
ecac4ebf
BP
2342 unsigned long *trunks;
2343 int vlan;
abe529af
BP
2344 size_t i;
2345 bool ok;
2346
2347 if (!s) {
2348 bundle_destroy(bundle_lookup(ofproto, aux));
2349 return 0;
2350 }
2351
cb22974d
BP
2352 ovs_assert(s->n_slaves == 1 || s->bond != NULL);
2353 ovs_assert((s->lacp != NULL) == (s->lacp_slaves != NULL));
abe529af
BP
2354
2355 bundle = bundle_lookup(ofproto, aux);
2356 if (!bundle) {
2357 bundle = xmalloc(sizeof *bundle);
2358
2359 bundle->ofproto = ofproto;
2360 hmap_insert(&ofproto->bundles, &bundle->hmap_node,
2361 hash_pointer(aux, 0));
2362 bundle->aux = aux;
2363 bundle->name = NULL;
2364
2365 list_init(&bundle->ports);
ecac4ebf 2366 bundle->vlan_mode = PORT_VLAN_TRUNK;
abe529af
BP
2367 bundle->vlan = -1;
2368 bundle->trunks = NULL;
5e9ceccd 2369 bundle->use_priority_tags = s->use_priority_tags;
abe529af
BP
2370 bundle->lacp = NULL;
2371 bundle->bond = NULL;
2372
2373 bundle->floodable = true;
2374
2375 bundle->src_mirrors = 0;
2376 bundle->dst_mirrors = 0;
2377 bundle->mirror_out = 0;
2378 }
2379
2380 if (!bundle->name || strcmp(s->name, bundle->name)) {
2381 free(bundle->name);
2382 bundle->name = xstrdup(s->name);
2383 }
2384
2385 /* LACP. */
2386 if (s->lacp) {
2387 if (!bundle->lacp) {
2cc3c58e 2388 ofproto->backer->need_revalidate = REV_RECONFIGURE;
abe529af
BP
2389 bundle->lacp = lacp_create();
2390 }
2391 lacp_configure(bundle->lacp, s->lacp);
2392 } else {
2393 lacp_destroy(bundle->lacp);
2394 bundle->lacp = NULL;
2395 }
2396
2397 /* Update set of ports. */
2398 ok = true;
2399 for (i = 0; i < s->n_slaves; i++) {
2400 if (!bundle_add_port(bundle, s->slaves[i],
df53d41c 2401 s->lacp ? &s->lacp_slaves[i] : NULL)) {
abe529af
BP
2402 ok = false;
2403 }
2404 }
2405 if (!ok || list_size(&bundle->ports) != s->n_slaves) {
2406 struct ofport_dpif *next_port;
2407
2408 LIST_FOR_EACH_SAFE (port, next_port, bundle_node, &bundle->ports) {
2409 for (i = 0; i < s->n_slaves; i++) {
56c769ab 2410 if (s->slaves[i] == port->up.ofp_port) {
abe529af
BP
2411 goto found;
2412 }
2413 }
2414
2415 bundle_del_port(port);
2416 found: ;
2417 }
2418 }
cb22974d 2419 ovs_assert(list_size(&bundle->ports) <= s->n_slaves);
abe529af
BP
2420
2421 if (list_is_empty(&bundle->ports)) {
2422 bundle_destroy(bundle);
2423 return EINVAL;
2424 }
2425
ecac4ebf 2426 /* Set VLAN tagging mode */
5e9ceccd
BP
2427 if (s->vlan_mode != bundle->vlan_mode
2428 || s->use_priority_tags != bundle->use_priority_tags) {
ecac4ebf 2429 bundle->vlan_mode = s->vlan_mode;
5e9ceccd 2430 bundle->use_priority_tags = s->use_priority_tags;
ecac4ebf
BP
2431 need_flush = true;
2432 }
2433
abe529af 2434 /* Set VLAN tag. */
ecac4ebf
BP
2435 vlan = (s->vlan_mode == PORT_VLAN_TRUNK ? -1
2436 : s->vlan >= 0 && s->vlan <= 4095 ? s->vlan
2437 : 0);
2438 if (vlan != bundle->vlan) {
2439 bundle->vlan = vlan;
abe529af
BP
2440 need_flush = true;
2441 }
2442
2443 /* Get trunked VLANs. */
ecac4ebf
BP
2444 switch (s->vlan_mode) {
2445 case PORT_VLAN_ACCESS:
2446 trunks = NULL;
2447 break;
2448
2449 case PORT_VLAN_TRUNK:
ebc56baa 2450 trunks = CONST_CAST(unsigned long *, s->trunks);
ecac4ebf
BP
2451 break;
2452
2453 case PORT_VLAN_NATIVE_UNTAGGED:
2454 case PORT_VLAN_NATIVE_TAGGED:
2455 if (vlan != 0 && (!s->trunks
2456 || !bitmap_is_set(s->trunks, vlan)
2457 || bitmap_is_set(s->trunks, 0))) {
2458 /* Force trunking the native VLAN and prohibit trunking VLAN 0. */
2459 if (s->trunks) {
2460 trunks = bitmap_clone(s->trunks, 4096);
2461 } else {
2462 trunks = bitmap_allocate1(4096);
2463 }
2464 bitmap_set1(trunks, vlan);
2465 bitmap_set0(trunks, 0);
2466 } else {
ebc56baa 2467 trunks = CONST_CAST(unsigned long *, s->trunks);
ecac4ebf
BP
2468 }
2469 break;
2470
2471 default:
2472 NOT_REACHED();
2473 }
abe529af
BP
2474 if (!vlan_bitmap_equal(trunks, bundle->trunks)) {
2475 free(bundle->trunks);
ecac4ebf
BP
2476 if (trunks == s->trunks) {
2477 bundle->trunks = vlan_bitmap_clone(trunks);
2478 } else {
2479 bundle->trunks = trunks;
2480 trunks = NULL;
2481 }
abe529af
BP
2482 need_flush = true;
2483 }
ecac4ebf
BP
2484 if (trunks != s->trunks) {
2485 free(trunks);
2486 }
abe529af
BP
2487
2488 /* Bonding. */
2489 if (!list_is_short(&bundle->ports)) {
2490 bundle->ofproto->has_bonded_bundles = true;
2491 if (bundle->bond) {
2492 if (bond_reconfigure(bundle->bond, s->bond)) {
2cc3c58e 2493 ofproto->backer->need_revalidate = REV_RECONFIGURE;
abe529af
BP
2494 }
2495 } else {
2496 bundle->bond = bond_create(s->bond);
2cc3c58e 2497 ofproto->backer->need_revalidate = REV_RECONFIGURE;
abe529af
BP
2498 }
2499
2500 LIST_FOR_EACH (port, bundle_node, &bundle->ports) {
df53d41c 2501 bond_slave_register(bundle->bond, port, port->up.netdev);
abe529af
BP
2502 }
2503 } else {
2504 bond_destroy(bundle->bond);
2505 bundle->bond = NULL;
2506 }
2507
2508 /* If we changed something that would affect MAC learning, un-learn
2509 * everything on this port and force flow revalidation. */
2510 if (need_flush) {
b44a10b7 2511 bundle_flush_macs(bundle, false);
abe529af
BP
2512 }
2513
2514 return 0;
2515}
2516
2517static void
2518bundle_remove(struct ofport *port_)
2519{
2520 struct ofport_dpif *port = ofport_dpif_cast(port_);
2521 struct ofbundle *bundle = port->bundle;
2522
2523 if (bundle) {
2524 bundle_del_port(port);
2525 if (list_is_empty(&bundle->ports)) {
2526 bundle_destroy(bundle);
2527 } else if (list_is_short(&bundle->ports)) {
2528 bond_destroy(bundle->bond);
2529 bundle->bond = NULL;
2530 }
2531 }
2532}
2533
2534static void
5f877369 2535send_pdu_cb(void *port_, const void *pdu, size_t pdu_size)
abe529af
BP
2536{
2537 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 10);
2538 struct ofport_dpif *port = port_;
2539 uint8_t ea[ETH_ADDR_LEN];
2540 int error;
2541
2542 error = netdev_get_etheraddr(port->up.netdev, ea);
2543 if (!error) {
abe529af 2544 struct ofpbuf packet;
5f877369 2545 void *packet_pdu;
abe529af
BP
2546
2547 ofpbuf_init(&packet, 0);
2548 packet_pdu = eth_compose(&packet, eth_addr_lacp, ea, ETH_TYPE_LACP,
5f877369
EJ
2549 pdu_size);
2550 memcpy(packet_pdu, pdu, pdu_size);
2551
97d6520b 2552 send_packet(port, &packet);
abe529af
BP
2553 ofpbuf_uninit(&packet);
2554 } else {
2555 VLOG_ERR_RL(&rl, "port %s: cannot obtain Ethernet address of iface "
2556 "%s (%s)", port->bundle->name,
2557 netdev_get_name(port->up.netdev), strerror(error));
2558 }
2559}
2560
2561static void
2562bundle_send_learning_packets(struct ofbundle *bundle)
2563{
2564 struct ofproto_dpif *ofproto = bundle->ofproto;
2565 int error, n_packets, n_errors;
2566 struct mac_entry *e;
2567
2568 error = n_packets = n_errors = 0;
2569 LIST_FOR_EACH (e, lru_node, &ofproto->ml->lrus) {
2570 if (e->port.p != bundle) {
ea131871
JG
2571 struct ofpbuf *learning_packet;
2572 struct ofport_dpif *port;
4dd1e3ca 2573 void *port_void;
ea131871
JG
2574 int ret;
2575
4dd1e3ca
BP
2576 /* The assignment to "port" is unnecessary but makes "grep"ing for
2577 * struct ofport_dpif more effective. */
2578 learning_packet = bond_compose_learning_packet(bundle->bond,
2579 e->mac, e->vlan,
2580 &port_void);
2581 port = port_void;
97d6520b 2582 ret = send_packet(port, learning_packet);
ea131871 2583 ofpbuf_delete(learning_packet);
abe529af
BP
2584 if (ret) {
2585 error = ret;
2586 n_errors++;
2587 }
2588 n_packets++;
2589 }
2590 }
2591
2592 if (n_errors) {
2593 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
2594 VLOG_WARN_RL(&rl, "bond %s: %d errors sending %d gratuitous learning "
2595 "packets, last error was: %s",
2596 bundle->name, n_errors, n_packets, strerror(error));
2597 } else {
2598 VLOG_DBG("bond %s: sent %d gratuitous learning packets",
2599 bundle->name, n_packets);
2600 }
2601}
2602
2603static void
2604bundle_run(struct ofbundle *bundle)
2605{
2606 if (bundle->lacp) {
2607 lacp_run(bundle->lacp, send_pdu_cb);
2608 }
2609 if (bundle->bond) {
2610 struct ofport_dpif *port;
2611
2612 LIST_FOR_EACH (port, bundle_node, &bundle->ports) {
015e08bc 2613 bond_slave_set_may_enable(bundle->bond, port, port->may_enable);
abe529af
BP
2614 }
2615
2cc3c58e 2616 bond_run(bundle->bond, &bundle->ofproto->backer->revalidate_set,
bdebeece 2617 lacp_status(bundle->lacp));
abe529af
BP
2618 if (bond_should_send_learning_packets(bundle->bond)) {
2619 bundle_send_learning_packets(bundle);
2620 }
2621 }
2622}
2623
2624static void
2625bundle_wait(struct ofbundle *bundle)
2626{
2627 if (bundle->lacp) {
2628 lacp_wait(bundle->lacp);
2629 }
2630 if (bundle->bond) {
2631 bond_wait(bundle->bond);
2632 }
2633}
2634\f
2635/* Mirrors. */
2636
2637static int
2638mirror_scan(struct ofproto_dpif *ofproto)
2639{
2640 int idx;
2641
2642 for (idx = 0; idx < MAX_MIRRORS; idx++) {
2643 if (!ofproto->mirrors[idx]) {
2644 return idx;
2645 }
2646 }
2647 return -1;
2648}
2649
2650static struct ofmirror *
2651mirror_lookup(struct ofproto_dpif *ofproto, void *aux)
2652{
2653 int i;
2654
2655 for (i = 0; i < MAX_MIRRORS; i++) {
2656 struct ofmirror *mirror = ofproto->mirrors[i];
2657 if (mirror && mirror->aux == aux) {
2658 return mirror;
2659 }
2660 }
2661
2662 return NULL;
2663}
2664
9ba15e2a
BP
2665/* Update the 'dup_mirrors' member of each of the ofmirrors in 'ofproto'. */
2666static void
2667mirror_update_dups(struct ofproto_dpif *ofproto)
2668{
2669 int i;
2670
2671 for (i = 0; i < MAX_MIRRORS; i++) {
2672 struct ofmirror *m = ofproto->mirrors[i];
2673
2674 if (m) {
2675 m->dup_mirrors = MIRROR_MASK_C(1) << i;
2676 }
2677 }
2678
2679 for (i = 0; i < MAX_MIRRORS; i++) {
2680 struct ofmirror *m1 = ofproto->mirrors[i];
2681 int j;
2682
2683 if (!m1) {
2684 continue;
2685 }
2686
2687 for (j = i + 1; j < MAX_MIRRORS; j++) {
2688 struct ofmirror *m2 = ofproto->mirrors[j];
2689
edb0540b 2690 if (m2 && m1->out == m2->out && m1->out_vlan == m2->out_vlan) {
9ba15e2a
BP
2691 m1->dup_mirrors |= MIRROR_MASK_C(1) << j;
2692 m2->dup_mirrors |= m1->dup_mirrors;
2693 }
2694 }
2695 }
2696}
2697
abe529af
BP
2698static int
2699mirror_set(struct ofproto *ofproto_, void *aux,
2700 const struct ofproto_mirror_settings *s)
2701{
2702 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
2703 mirror_mask_t mirror_bit;
2704 struct ofbundle *bundle;
2705 struct ofmirror *mirror;
2706 struct ofbundle *out;
2707 struct hmapx srcs; /* Contains "struct ofbundle *"s. */
2708 struct hmapx dsts; /* Contains "struct ofbundle *"s. */
2709 int out_vlan;
2710
2711 mirror = mirror_lookup(ofproto, aux);
2712 if (!s) {
2713 mirror_destroy(mirror);
2714 return 0;
2715 }
2716 if (!mirror) {
2717 int idx;
2718
2719 idx = mirror_scan(ofproto);
2720 if (idx < 0) {
2721 VLOG_WARN("bridge %s: maximum of %d port mirrors reached, "
2722 "cannot create %s",
2723 ofproto->up.name, MAX_MIRRORS, s->name);
2724 return EFBIG;
2725 }
2726
2727 mirror = ofproto->mirrors[idx] = xzalloc(sizeof *mirror);
2728 mirror->ofproto = ofproto;
2729 mirror->idx = idx;
8b28d864 2730 mirror->aux = aux;
abe529af
BP
2731 mirror->out_vlan = -1;
2732 mirror->name = NULL;
2733 }
2734
2735 if (!mirror->name || strcmp(s->name, mirror->name)) {
2736 free(mirror->name);
2737 mirror->name = xstrdup(s->name);
2738 }
2739
2740 /* Get the new configuration. */
2741 if (s->out_bundle) {
2742 out = bundle_lookup(ofproto, s->out_bundle);
2743 if (!out) {
2744 mirror_destroy(mirror);
2745 return EINVAL;
2746 }
2747 out_vlan = -1;
2748 } else {
2749 out = NULL;
2750 out_vlan = s->out_vlan;
2751 }
2752 bundle_lookup_multiple(ofproto, s->srcs, s->n_srcs, &srcs);
2753 bundle_lookup_multiple(ofproto, s->dsts, s->n_dsts, &dsts);
2754
2755 /* If the configuration has not changed, do nothing. */
2756 if (hmapx_equals(&srcs, &mirror->srcs)
2757 && hmapx_equals(&dsts, &mirror->dsts)
2758 && vlan_bitmap_equal(mirror->vlans, s->src_vlans)
2759 && mirror->out == out
2760 && mirror->out_vlan == out_vlan)
2761 {
2762 hmapx_destroy(&srcs);
2763 hmapx_destroy(&dsts);
2764 return 0;
2765 }
2766
2767 hmapx_swap(&srcs, &mirror->srcs);
2768 hmapx_destroy(&srcs);
2769
2770 hmapx_swap(&dsts, &mirror->dsts);
2771 hmapx_destroy(&dsts);
2772
2773 free(mirror->vlans);
2774 mirror->vlans = vlan_bitmap_clone(s->src_vlans);
2775
2776 mirror->out = out;
2777 mirror->out_vlan = out_vlan;
2778
2779 /* Update bundles. */
2780 mirror_bit = MIRROR_MASK_C(1) << mirror->idx;
2781 HMAP_FOR_EACH (bundle, hmap_node, &mirror->ofproto->bundles) {
2782 if (hmapx_contains(&mirror->srcs, bundle)) {
2783 bundle->src_mirrors |= mirror_bit;
2784 } else {
2785 bundle->src_mirrors &= ~mirror_bit;
2786 }
2787
2788 if (hmapx_contains(&mirror->dsts, bundle)) {
2789 bundle->dst_mirrors |= mirror_bit;
2790 } else {
2791 bundle->dst_mirrors &= ~mirror_bit;
2792 }
2793
2794 if (mirror->out == bundle) {
2795 bundle->mirror_out |= mirror_bit;
2796 } else {
2797 bundle->mirror_out &= ~mirror_bit;
2798 }
2799 }
2800
2cc3c58e 2801 ofproto->backer->need_revalidate = REV_RECONFIGURE;
ccb7c863 2802 ofproto->has_mirrors = true;
2cc3c58e
EJ
2803 mac_learning_flush(ofproto->ml,
2804 &ofproto->backer->revalidate_set);
9ba15e2a 2805 mirror_update_dups(ofproto);
abe529af
BP
2806
2807 return 0;
2808}
2809
2810static void
2811mirror_destroy(struct ofmirror *mirror)
2812{
2813 struct ofproto_dpif *ofproto;
2814 mirror_mask_t mirror_bit;
2815 struct ofbundle *bundle;
ccb7c863 2816 int i;
abe529af
BP
2817
2818 if (!mirror) {
2819 return;
2820 }
2821
2822 ofproto = mirror->ofproto;
2cc3c58e
EJ
2823 ofproto->backer->need_revalidate = REV_RECONFIGURE;
2824 mac_learning_flush(ofproto->ml, &ofproto->backer->revalidate_set);
abe529af
BP
2825
2826 mirror_bit = MIRROR_MASK_C(1) << mirror->idx;
2827 HMAP_FOR_EACH (bundle, hmap_node, &ofproto->bundles) {
2828 bundle->src_mirrors &= ~mirror_bit;
2829 bundle->dst_mirrors &= ~mirror_bit;
2830 bundle->mirror_out &= ~mirror_bit;
2831 }
2832
2833 hmapx_destroy(&mirror->srcs);
2834 hmapx_destroy(&mirror->dsts);
2835 free(mirror->vlans);
2836
2837 ofproto->mirrors[mirror->idx] = NULL;
2838 free(mirror->name);
2839 free(mirror);
9ba15e2a
BP
2840
2841 mirror_update_dups(ofproto);
ccb7c863
BP
2842
2843 ofproto->has_mirrors = false;
2844 for (i = 0; i < MAX_MIRRORS; i++) {
2845 if (ofproto->mirrors[i]) {
2846 ofproto->has_mirrors = true;
2847 break;
2848 }
2849 }
abe529af
BP
2850}
2851
9d24de3b
JP
2852static int
2853mirror_get_stats(struct ofproto *ofproto_, void *aux,
2854 uint64_t *packets, uint64_t *bytes)
2855{
2856 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
2857 struct ofmirror *mirror = mirror_lookup(ofproto, aux);
2858
2859 if (!mirror) {
2860 *packets = *bytes = UINT64_MAX;
2861 return 0;
2862 }
2863
2864 *packets = mirror->packet_count;
2865 *bytes = mirror->byte_count;
2866
2867 return 0;
2868}
2869
abe529af
BP
2870static int
2871set_flood_vlans(struct ofproto *ofproto_, unsigned long *flood_vlans)
2872{
2873 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
2874 if (mac_learning_set_flood_vlans(ofproto->ml, flood_vlans)) {
2cc3c58e 2875 mac_learning_flush(ofproto->ml, &ofproto->backer->revalidate_set);
abe529af
BP
2876 }
2877 return 0;
2878}
2879
2880static bool
b4affc74 2881is_mirror_output_bundle(const struct ofproto *ofproto_, void *aux)
abe529af
BP
2882{
2883 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
2884 struct ofbundle *bundle = bundle_lookup(ofproto, aux);
2885 return bundle && bundle->mirror_out != 0;
2886}
8402c74b
SS
2887
2888static void
b53055f4 2889forward_bpdu_changed(struct ofproto *ofproto_)
8402c74b
SS
2890{
2891 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
2cc3c58e 2892 ofproto->backer->need_revalidate = REV_RECONFIGURE;
8402c74b 2893}
e764773c
BP
2894
2895static void
c4069512
BP
2896set_mac_table_config(struct ofproto *ofproto_, unsigned int idle_time,
2897 size_t max_entries)
e764773c
BP
2898{
2899 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
2900 mac_learning_set_idle_time(ofproto->ml, idle_time);
c4069512 2901 mac_learning_set_max_entries(ofproto->ml, max_entries);
e764773c 2902}
abe529af
BP
2903\f
2904/* Ports. */
2905
2906static struct ofport_dpif *
4acbc98d 2907get_ofp_port(const struct ofproto_dpif *ofproto, uint16_t ofp_port)
abe529af 2908{
7df6a8bd
BP
2909 struct ofport *ofport = ofproto_get_port(&ofproto->up, ofp_port);
2910 return ofport ? ofport_dpif_cast(ofport) : NULL;
abe529af
BP
2911}
2912
2913static struct ofport_dpif *
4acbc98d 2914get_odp_port(const struct ofproto_dpif *ofproto, uint32_t odp_port)
abe529af 2915{
7c33b188
JR
2916 struct ofport_dpif *port = odp_port_to_ofport(ofproto->backer, odp_port);
2917 return port && &ofproto->up == port->up.ofproto ? port : NULL;
abe529af
BP
2918}
2919
2920static void
e1b1d06a
JP
2921ofproto_port_from_dpif_port(struct ofproto_dpif *ofproto,
2922 struct ofproto_port *ofproto_port,
abe529af
BP
2923 struct dpif_port *dpif_port)
2924{
2925 ofproto_port->name = dpif_port->name;
2926 ofproto_port->type = dpif_port->type;
e1b1d06a 2927 ofproto_port->ofp_port = odp_port_to_ofp_port(ofproto, dpif_port->port_no);
abe529af
BP
2928}
2929
0a740f48
EJ
2930static struct ofport_dpif *
2931ofport_get_peer(const struct ofport_dpif *ofport_dpif)
2932{
2933 const struct ofproto_dpif *ofproto;
2934 const char *peer;
2935
2936 peer = netdev_vport_patch_peer(ofport_dpif->up.netdev);
2937 if (!peer) {
2938 return NULL;
2939 }
2940
2941 HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_node, &all_ofproto_dpifs) {
2942 struct ofport *ofport;
2943
2944 ofport = shash_find_data(&ofproto->up.port_by_name, peer);
2945 if (ofport && ofport->ofproto->ofproto_class == &ofproto_dpif_class) {
2946 return ofport_dpif_cast(ofport);
2947 }
2948 }
2949 return NULL;
2950}
2951
0aa66d6e
EJ
2952static void
2953port_run_fast(struct ofport_dpif *ofport)
2954{
2955 if (ofport->cfm && cfm_should_send_ccm(ofport->cfm)) {
2956 struct ofpbuf packet;
2957
2958 ofpbuf_init(&packet, 0);
2959 cfm_compose_ccm(ofport->cfm, &packet, ofport->up.pp.hw_addr);
2960 send_packet(ofport, &packet);
2961 ofpbuf_uninit(&packet);
2962 }
2963}
2964
abe529af
BP
2965static void
2966port_run(struct ofport_dpif *ofport)
2967{
3e5b3fdb
EJ
2968 long long int carrier_seq = netdev_get_carrier_resets(ofport->up.netdev);
2969 bool carrier_changed = carrier_seq != ofport->carrier_seq;
015e08bc
EJ
2970 bool enable = netdev_get_carrier(ofport->up.netdev);
2971
3e5b3fdb
EJ
2972 ofport->carrier_seq = carrier_seq;
2973
0aa66d6e 2974 port_run_fast(ofport);
b9ad7294
EJ
2975
2976 if (ofport->tnl_port
2977 && tnl_port_reconfigure(&ofport->up, ofport->odp_port,
2978 &ofport->tnl_port)) {
2979 ofproto_dpif_cast(ofport->up.ofproto)->backer->need_revalidate = true;
2980 }
2981
abe529af 2982 if (ofport->cfm) {
4653c558
EJ
2983 int cfm_opup = cfm_get_opup(ofport->cfm);
2984
abe529af 2985 cfm_run(ofport->cfm);
4653c558
EJ
2986 enable = enable && !cfm_get_fault(ofport->cfm);
2987
2988 if (cfm_opup >= 0) {
2989 enable = enable && cfm_opup;
2990 }
abe529af 2991 }
015e08bc
EJ
2992
2993 if (ofport->bundle) {
2994 enable = enable && lacp_slave_may_enable(ofport->bundle->lacp, ofport);
3e5b3fdb
EJ
2995 if (carrier_changed) {
2996 lacp_slave_carrier_changed(ofport->bundle->lacp, ofport);
2997 }
015e08bc
EJ
2998 }
2999
daff3353
EJ
3000 if (ofport->may_enable != enable) {
3001 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofport->up.ofproto);
3002
3003 if (ofproto->has_bundle_action) {
2cc3c58e 3004 ofproto->backer->need_revalidate = REV_PORT_TOGGLED;
daff3353
EJ
3005 }
3006 }
3007
015e08bc 3008 ofport->may_enable = enable;
abe529af
BP
3009}
3010
3011static void
3012port_wait(struct ofport_dpif *ofport)
3013{
3014 if (ofport->cfm) {
3015 cfm_wait(ofport->cfm);
3016 }
3017}
3018
3019static int
3020port_query_by_name(const struct ofproto *ofproto_, const char *devname,
3021 struct ofproto_port *ofproto_port)
3022{
3023 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
3024 struct dpif_port dpif_port;
3025 int error;
3026
0a740f48
EJ
3027 if (sset_contains(&ofproto->ghost_ports, devname)) {
3028 const char *type = netdev_get_type_from_name(devname);
3029
3030 /* We may be called before ofproto->up.port_by_name is populated with
3031 * the appropriate ofport. For this reason, we must get the name and
3032 * type from the netdev layer directly. */
3033 if (type) {
3034 const struct ofport *ofport;
3035
3036 ofport = shash_find_data(&ofproto->up.port_by_name, devname);
3037 ofproto_port->ofp_port = ofport ? ofport->ofp_port : OFPP_NONE;
3038 ofproto_port->name = xstrdup(devname);
3039 ofproto_port->type = xstrdup(type);
3040 return 0;
3041 }
3042 return ENODEV;
3043 }
3044
acf60855
JP
3045 if (!sset_contains(&ofproto->ports, devname)) {
3046 return ENODEV;
3047 }
3048 error = dpif_port_query_by_name(ofproto->backer->dpif,
3049 devname, &dpif_port);
abe529af 3050 if (!error) {
e1b1d06a 3051 ofproto_port_from_dpif_port(ofproto, ofproto_port, &dpif_port);
abe529af
BP
3052 }
3053 return error;
3054}
3055
3056static int
e1b1d06a 3057port_add(struct ofproto *ofproto_, struct netdev *netdev)
abe529af
BP
3058{
3059 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
b9ad7294
EJ
3060 const char *dp_port_name = netdev_vport_get_dpif_port(netdev);
3061 const char *devname = netdev_get_name(netdev);
abe529af 3062
0a740f48
EJ
3063 if (netdev_vport_is_patch(netdev)) {
3064 sset_add(&ofproto->ghost_ports, netdev_get_name(netdev));
3065 return 0;
3066 }
3067
b9ad7294 3068 if (!dpif_port_exists(ofproto->backer->dpif, dp_port_name)) {
7d82ab2e
KM
3069 uint32_t port_no = UINT32_MAX;
3070 int error;
3071
3072 error = dpif_port_add(ofproto->backer->dpif, netdev, &port_no);
b9ad7294
EJ
3073 if (error) {
3074 return error;
3075 }
7d82ab2e
KM
3076 if (netdev_get_tunnel_config(netdev)) {
3077 simap_put(&ofproto->backer->tnl_backers, dp_port_name, port_no);
3078 }
acf60855 3079 }
b9ad7294
EJ
3080
3081 if (netdev_get_tunnel_config(netdev)) {
3082 sset_add(&ofproto->ghost_ports, devname);
b9ad7294
EJ
3083 } else {
3084 sset_add(&ofproto->ports, devname);
3085 }
3086 return 0;
3087}
3088
abe529af
BP
3089static int
3090port_del(struct ofproto *ofproto_, uint16_t ofp_port)
3091{
3092 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
b9ad7294 3093 struct ofport_dpif *ofport = get_ofp_port(ofproto, ofp_port);
e1b1d06a 3094 int error = 0;
abe529af 3095
b9ad7294
EJ
3096 if (!ofport) {
3097 return 0;
e1b1d06a 3098 }
b9ad7294
EJ
3099
3100 sset_find_and_delete(&ofproto->ghost_ports,
3101 netdev_get_name(ofport->up.netdev));
a614d823
KM
3102 ofproto->backer->need_revalidate = REV_RECONFIGURE;
3103 if (!ofport->tnl_port) {
b9ad7294
EJ
3104 error = dpif_port_del(ofproto->backer->dpif, ofport->odp_port);
3105 if (!error) {
abe529af
BP
3106 /* The caller is going to close ofport->up.netdev. If this is a
3107 * bonded port, then the bond is using that netdev, so remove it
3108 * from the bond. The client will need to reconfigure everything
3109 * after deleting ports, so then the slave will get re-added. */
3110 bundle_remove(&ofport->up);
3111 }
3112 }
3113 return error;
3114}
3115
6527c598
PS
3116static int
3117port_get_stats(const struct ofport *ofport_, struct netdev_stats *stats)
3118{
3119 struct ofport_dpif *ofport = ofport_dpif_cast(ofport_);
3120 int error;
3121
3122 error = netdev_get_stats(ofport->up.netdev, stats);
3123
ee382d89 3124 if (!error && ofport_->ofp_port == OFPP_LOCAL) {
6527c598
PS
3125 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofport->up.ofproto);
3126
3127 /* ofproto->stats.tx_packets represents packets that we created
3128 * internally and sent to some port (e.g. packets sent with
3129 * send_packet()). Account for them as if they had come from
3130 * OFPP_LOCAL and got forwarded. */
3131
3132 if (stats->rx_packets != UINT64_MAX) {
3133 stats->rx_packets += ofproto->stats.tx_packets;
3134 }
3135
3136 if (stats->rx_bytes != UINT64_MAX) {
3137 stats->rx_bytes += ofproto->stats.tx_bytes;
3138 }
3139
3140 /* ofproto->stats.rx_packets represents packets that were received on
3141 * some port and we processed internally and dropped (e.g. STP).
4e090bc7 3142 * Account for them as if they had been forwarded to OFPP_LOCAL. */
6527c598
PS
3143
3144 if (stats->tx_packets != UINT64_MAX) {
3145 stats->tx_packets += ofproto->stats.rx_packets;
3146 }
3147
3148 if (stats->tx_bytes != UINT64_MAX) {
3149 stats->tx_bytes += ofproto->stats.rx_bytes;
3150 }
3151 }
3152
3153 return error;
3154}
3155
3156/* Account packets for LOCAL port. */
3157static void
3158ofproto_update_local_port_stats(const struct ofproto *ofproto_,
3159 size_t tx_size, size_t rx_size)
3160{
3161 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
3162
3163 if (rx_size) {
3164 ofproto->stats.rx_packets++;
3165 ofproto->stats.rx_bytes += rx_size;
3166 }
3167 if (tx_size) {
3168 ofproto->stats.tx_packets++;
3169 ofproto->stats.tx_bytes += tx_size;
3170 }
3171}
3172
abe529af 3173struct port_dump_state {
acf60855
JP
3174 uint32_t bucket;
3175 uint32_t offset;
0a740f48 3176 bool ghost;
da78d43d
BP
3177
3178 struct ofproto_port port;
3179 bool has_port;
abe529af
BP
3180};
3181
3182static int
acf60855 3183port_dump_start(const struct ofproto *ofproto_ OVS_UNUSED, void **statep)
abe529af 3184{
0a740f48 3185 *statep = xzalloc(sizeof(struct port_dump_state));
abe529af
BP
3186 return 0;
3187}
3188
3189static int
b9ad7294 3190port_dump_next(const struct ofproto *ofproto_, void *state_,
abe529af
BP
3191 struct ofproto_port *port)
3192{
e1b1d06a 3193 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
abe529af 3194 struct port_dump_state *state = state_;
0a740f48 3195 const struct sset *sset;
acf60855 3196 struct sset_node *node;
abe529af 3197
da78d43d
BP
3198 if (state->has_port) {
3199 ofproto_port_destroy(&state->port);
3200 state->has_port = false;
3201 }
0a740f48
EJ
3202 sset = state->ghost ? &ofproto->ghost_ports : &ofproto->ports;
3203 while ((node = sset_at_position(sset, &state->bucket, &state->offset))) {
acf60855
JP
3204 int error;
3205
da78d43d
BP
3206 error = port_query_by_name(ofproto_, node->name, &state->port);
3207 if (!error) {
3208 *port = state->port;
3209 state->has_port = true;
3210 return 0;
3211 } else if (error != ENODEV) {
acf60855
JP
3212 return error;
3213 }
abe529af 3214 }
acf60855 3215
0a740f48
EJ
3216 if (!state->ghost) {
3217 state->ghost = true;
3218 state->bucket = 0;
3219 state->offset = 0;
3220 return port_dump_next(ofproto_, state_, port);
3221 }
3222
acf60855 3223 return EOF;
abe529af
BP
3224}
3225
3226static int
3227port_dump_done(const struct ofproto *ofproto_ OVS_UNUSED, void *state_)
3228{
3229 struct port_dump_state *state = state_;
3230
da78d43d
BP
3231 if (state->has_port) {
3232 ofproto_port_destroy(&state->port);
3233 }
abe529af
BP
3234 free(state);
3235 return 0;
3236}
3237
3238static int
3239port_poll(const struct ofproto *ofproto_, char **devnamep)
3240{
3241 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
acf60855
JP
3242
3243 if (ofproto->port_poll_errno) {
3244 int error = ofproto->port_poll_errno;
3245 ofproto->port_poll_errno = 0;
3246 return error;
3247 }
3248
3249 if (sset_is_empty(&ofproto->port_poll_set)) {
3250 return EAGAIN;
3251 }
3252
3253 *devnamep = sset_pop(&ofproto->port_poll_set);
3254 return 0;
abe529af
BP
3255}
3256
3257static void
3258port_poll_wait(const struct ofproto *ofproto_)
3259{
3260 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
acf60855 3261 dpif_port_poll_wait(ofproto->backer->dpif);
abe529af
BP
3262}
3263
3264static int
3265port_is_lacp_current(const struct ofport *ofport_)
3266{
3267 const struct ofport_dpif *ofport = ofport_dpif_cast(ofport_);
3268 return (ofport->bundle && ofport->bundle->lacp
3269 ? lacp_slave_is_current(ofport->bundle->lacp, ofport)
3270 : -1);
3271}
3272\f
3273/* Upcall handling. */
3274
501f8d1f
BP
3275/* Flow miss batching.
3276 *
3277 * Some dpifs implement operations faster when you hand them off in a batch.
3278 * To allow batching, "struct flow_miss" queues the dpif-related work needed
3279 * for a given flow. Each "struct flow_miss" corresponds to sending one or
3280 * more packets, plus possibly installing the flow in the dpif.
3281 *
3282 * So far we only batch the operations that affect flow setup time the most.
3283 * It's possible to batch more than that, but the benefit might be minimal. */
3284struct flow_miss {
3285 struct hmap_node hmap_node;
acf60855 3286 struct ofproto_dpif *ofproto;
501f8d1f 3287 struct flow flow;
b0f7b9b5 3288 enum odp_key_fitness key_fitness;
501f8d1f
BP
3289 const struct nlattr *key;
3290 size_t key_len;
e84173dc 3291 ovs_be16 initial_tci;
501f8d1f 3292 struct list packets;
6a7e895f 3293 enum dpif_upcall_type upcall_type;
a088a1ff 3294 uint32_t odp_in_port;
501f8d1f
BP
3295};
3296
3297struct flow_miss_op {
c2b565b5 3298 struct dpif_op dpif_op;
5fe20d5d
BP
3299 struct subfacet *subfacet; /* Subfacet */
3300 void *garbage; /* Pointer to pass to free(), NULL if none. */
3301 uint64_t stub[1024 / 8]; /* Temporary buffer. */
501f8d1f
BP
3302};
3303
62cd7072
BP
3304/* Sends an OFPT_PACKET_IN message for 'packet' of type OFPR_NO_MATCH to each
3305 * OpenFlow controller as necessary according to their individual
29ebe880 3306 * configurations. */
62cd7072 3307static void
a39edbd4 3308send_packet_in_miss(struct ofproto_dpif *ofproto, const struct ofpbuf *packet,
29ebe880 3309 const struct flow *flow)
62cd7072
BP
3310{
3311 struct ofputil_packet_in pin;
3312
3e3252fa
EJ
3313 pin.packet = packet->data;
3314 pin.packet_len = packet->size;
62cd7072 3315 pin.reason = OFPR_NO_MATCH;
a7349929 3316 pin.controller_id = 0;
54834960
EJ
3317
3318 pin.table_id = 0;
3319 pin.cookie = 0;
3320
62cd7072 3321 pin.send_len = 0; /* not used for flow table misses */
5d6c3af0
EJ
3322
3323 flow_get_metadata(flow, &pin.fmd);
3324
d8653c38 3325 connmgr_send_packet_in(ofproto->up.connmgr, &pin);
62cd7072
BP
3326}
3327
6a7e895f 3328static enum slow_path_reason
abe529af 3329process_special(struct ofproto_dpif *ofproto, const struct flow *flow,
ffaef958 3330 const struct ofport_dpif *ofport, const struct ofpbuf *packet)
abe529af 3331{
b6e001b6 3332 if (!ofport) {
6a7e895f 3333 return 0;
ffaef958 3334 } else if (ofport->cfm && cfm_should_process_flow(ofport->cfm, flow)) {
b6e001b6 3335 if (packet) {
abe529af
BP
3336 cfm_process_heartbeat(ofport->cfm, packet);
3337 }
6a7e895f 3338 return SLOW_CFM;
b6e001b6
EJ
3339 } else if (ofport->bundle && ofport->bundle->lacp
3340 && flow->dl_type == htons(ETH_TYPE_LACP)) {
3341 if (packet) {
3342 lacp_process_packet(ofport->bundle->lacp, ofport, packet);
abe529af 3343 }
6a7e895f 3344 return SLOW_LACP;
21f7563c
JP
3345 } else if (ofproto->stp && stp_should_process_flow(flow)) {
3346 if (packet) {
3347 stp_process_packet(ofport, packet);
3348 }
6a7e895f 3349 return SLOW_STP;
ffaef958
BP
3350 } else {
3351 return 0;
abe529af 3352 }
abe529af
BP
3353}
3354
501f8d1f 3355static struct flow_miss *
ddbc5954
JP
3356flow_miss_find(struct hmap *todo, const struct ofproto_dpif *ofproto,
3357 const struct flow *flow, uint32_t hash)
abe529af 3358{
501f8d1f 3359 struct flow_miss *miss;
abe529af 3360
501f8d1f 3361 HMAP_FOR_EACH_WITH_HASH (miss, hmap_node, hash, todo) {
ddbc5954 3362 if (miss->ofproto == ofproto && flow_equal(&miss->flow, flow)) {
501f8d1f
BP
3363 return miss;
3364 }
3365 }
abe529af 3366
b23cdad9 3367 return NULL;
501f8d1f 3368}
abe529af 3369
9d6ac44e
BP
3370/* Partially Initializes 'op' as an "execute" operation for 'miss' and
3371 * 'packet'. The caller must initialize op->actions and op->actions_len. If
3372 * 'miss' is associated with a subfacet the caller must also initialize the
3373 * returned op->subfacet, and if anything needs to be freed after processing
3374 * the op, the caller must initialize op->garbage also. */
501f8d1f 3375static void
9d6ac44e
BP
3376init_flow_miss_execute_op(struct flow_miss *miss, struct ofpbuf *packet,
3377 struct flow_miss_op *op)
501f8d1f 3378{
9d6ac44e
BP
3379 if (miss->flow.vlan_tci != miss->initial_tci) {
3380 /* This packet was received on a VLAN splinter port. We
3381 * added a VLAN to the packet to make the packet resemble
3382 * the flow, but the actions were composed assuming that
3383 * the packet contained no VLAN. So, we must remove the
3384 * VLAN header from the packet before trying to execute the
3385 * actions. */
3386 eth_pop_vlan(packet);
3387 }
3388
3389 op->subfacet = NULL;
3390 op->garbage = NULL;
3391 op->dpif_op.type = DPIF_OP_EXECUTE;
3392 op->dpif_op.u.execute.key = miss->key;
3393 op->dpif_op.u.execute.key_len = miss->key_len;
3394 op->dpif_op.u.execute.packet = packet;
3395}
3396
3397/* Helper for handle_flow_miss_without_facet() and
3398 * handle_flow_miss_with_facet(). */
3399static void
3400handle_flow_miss_common(struct rule_dpif *rule,
3401 struct ofpbuf *packet, const struct flow *flow)
3402{
3403 struct ofproto_dpif *ofproto = ofproto_dpif_cast(rule->up.ofproto);
3404
3405 ofproto->n_matches++;
3406
3407 if (rule->up.cr.priority == FAIL_OPEN_PRIORITY) {
3408 /*
3409 * Extra-special case for fail-open mode.
3410 *
3411 * We are in fail-open mode and the packet matched the fail-open
3412 * rule, but we are connected to a controller too. We should send
3413 * the packet up to the controller in the hope that it will try to
3414 * set up a flow and thereby allow us to exit fail-open.
3415 *
3416 * See the top-level comment in fail-open.c for more information.
3417 */
3418 send_packet_in_miss(ofproto, packet, flow);
3419 }
3420}
3421
3422/* Figures out whether a flow that missed in 'ofproto', whose details are in
3423 * 'miss', is likely to be worth tracking in detail in userspace and (usually)
3424 * installing a datapath flow. The answer is usually "yes" (a return value of
3425 * true). However, for short flows the cost of bookkeeping is much higher than
3426 * the benefits, so when the datapath holds a large number of flows we impose
3427 * some heuristics to decide which flows are likely to be worth tracking. */
3428static bool
3429flow_miss_should_make_facet(struct ofproto_dpif *ofproto,
3430 struct flow_miss *miss, uint32_t hash)
3431{
3432 if (!ofproto->governor) {
3433 size_t n_subfacets;
3434
3435 n_subfacets = hmap_count(&ofproto->subfacets);
3436 if (n_subfacets * 2 <= ofproto->up.flow_eviction_threshold) {
3437 return true;
3438 }
3439
3440 ofproto->governor = governor_create(ofproto->up.name);
3441 }
3442
3443 return governor_should_install_flow(ofproto->governor, hash,
3444 list_size(&miss->packets));
3445}
3446
3447/* Handles 'miss', which matches 'rule', without creating a facet or subfacet
3448 * or creating any datapath flow. May add an "execute" operation to 'ops' and
3449 * increment '*n_ops'. */
3450static void
3451handle_flow_miss_without_facet(struct flow_miss *miss,
3452 struct rule_dpif *rule,
3453 struct flow_miss_op *ops, size_t *n_ops)
3454{
3455 struct ofproto_dpif *ofproto = ofproto_dpif_cast(rule->up.ofproto);
a7752d4a 3456 long long int now = time_msec();
9d6ac44e 3457 struct action_xlate_ctx ctx;
530a1d91 3458 struct ofpbuf *packet;
2b459b83 3459
9d6ac44e
BP
3460 LIST_FOR_EACH (packet, list_node, &miss->packets) {
3461 struct flow_miss_op *op = &ops[*n_ops];
3462 struct dpif_flow_stats stats;
3463 struct ofpbuf odp_actions;
abe529af 3464
9d6ac44e 3465 COVERAGE_INC(facet_suppress);
501f8d1f 3466
9d6ac44e 3467 ofpbuf_use_stub(&odp_actions, op->stub, sizeof op->stub);
501f8d1f 3468
a7752d4a 3469 dpif_flow_stats_extract(&miss->flow, packet, now, &stats);
9d6ac44e 3470 rule_credit_stats(rule, &stats);
abe529af 3471
9d6ac44e
BP
3472 action_xlate_ctx_init(&ctx, ofproto, &miss->flow, miss->initial_tci,
3473 rule, 0, packet);
3474 ctx.resubmit_stats = &stats;
f25d0cf3 3475 xlate_actions(&ctx, rule->up.ofpacts, rule->up.ofpacts_len,
9d6ac44e 3476 &odp_actions);
abe529af 3477
9d6ac44e
BP
3478 if (odp_actions.size) {
3479 struct dpif_execute *execute = &op->dpif_op.u.execute;
3480
3481 init_flow_miss_execute_op(miss, packet, op);
3482 execute->actions = odp_actions.data;
3483 execute->actions_len = odp_actions.size;
3484 op->garbage = ofpbuf_get_uninit_pointer(&odp_actions);
3485
3486 (*n_ops)++;
3487 } else {
3488 ofpbuf_uninit(&odp_actions);
3489 }
abe529af 3490 }
9d6ac44e
BP
3491}
3492
3493/* Handles 'miss', which matches 'facet'. May add any required datapath
459b16a1
BP
3494 * operations to 'ops', incrementing '*n_ops' for each new op.
3495 *
3496 * All of the packets in 'miss' are considered to have arrived at time 'now'.
3497 * This is really important only for new facets: if we just called time_msec()
3498 * here, then the new subfacet or its packets could look (occasionally) as
3499 * though it was used some time after the facet was used. That can make a
3500 * one-packet flow look like it has a nonzero duration, which looks odd in
3501 * e.g. NetFlow statistics. */
9d6ac44e
BP
3502static void
3503handle_flow_miss_with_facet(struct flow_miss *miss, struct facet *facet,
459b16a1 3504 long long int now,
9d6ac44e
BP
3505 struct flow_miss_op *ops, size_t *n_ops)
3506{
6a7e895f
BP
3507 struct ofproto_dpif *ofproto = ofproto_dpif_cast(facet->rule->up.ofproto);
3508 enum subfacet_path want_path;
9d6ac44e
BP
3509 struct subfacet *subfacet;
3510 struct ofpbuf *packet;
abe529af 3511
a088a1ff 3512 subfacet = subfacet_create(facet, miss, now);
b0f7b9b5 3513
530a1d91 3514 LIST_FOR_EACH (packet, list_node, &miss->packets) {
5fe20d5d 3515 struct flow_miss_op *op = &ops[*n_ops];
67d91f78 3516 struct dpif_flow_stats stats;
5fe20d5d 3517 struct ofpbuf odp_actions;
67d91f78 3518
9d6ac44e 3519 handle_flow_miss_common(facet->rule, packet, &miss->flow);
501f8d1f 3520
5fe20d5d 3521 ofpbuf_use_stub(&odp_actions, op->stub, sizeof op->stub);
6a7e895f 3522 if (!subfacet->actions || subfacet->slow) {
5fe20d5d 3523 subfacet_make_actions(subfacet, packet, &odp_actions);
501f8d1f 3524 }
67d91f78 3525
459b16a1 3526 dpif_flow_stats_extract(&facet->flow, packet, now, &stats);
15baa734 3527 subfacet_update_stats(subfacet, &stats);
67d91f78 3528
9d6ac44e
BP
3529 if (subfacet->actions_len) {
3530 struct dpif_execute *execute = &op->dpif_op.u.execute;
8338659a 3531
9d6ac44e
BP
3532 init_flow_miss_execute_op(miss, packet, op);
3533 op->subfacet = subfacet;
6a7e895f 3534 if (!subfacet->slow) {
9d6ac44e
BP
3535 execute->actions = subfacet->actions;
3536 execute->actions_len = subfacet->actions_len;
3537 ofpbuf_uninit(&odp_actions);
3538 } else {
3539 execute->actions = odp_actions.data;
3540 execute->actions_len = odp_actions.size;
3541 op->garbage = ofpbuf_get_uninit_pointer(&odp_actions);
3542 }
999fba59 3543
9d6ac44e 3544 (*n_ops)++;
5fe20d5d 3545 } else {
9d6ac44e 3546 ofpbuf_uninit(&odp_actions);
5fe20d5d 3547 }
501f8d1f
BP
3548 }
3549
6a7e895f
BP
3550 want_path = subfacet_want_path(subfacet->slow);
3551 if (miss->upcall_type == DPIF_UC_MISS || subfacet->path != want_path) {
501f8d1f 3552 struct flow_miss_op *op = &ops[(*n_ops)++];
c2b565b5 3553 struct dpif_flow_put *put = &op->dpif_op.u.flow_put;
501f8d1f 3554
b0f7b9b5 3555 op->subfacet = subfacet;
5fe20d5d 3556 op->garbage = NULL;
c2b565b5 3557 op->dpif_op.type = DPIF_OP_FLOW_PUT;
501f8d1f
BP
3558 put->flags = DPIF_FP_CREATE | DPIF_FP_MODIFY;
3559 put->key = miss->key;
3560 put->key_len = miss->key_len;
6a7e895f
BP
3561 if (want_path == SF_FAST_PATH) {
3562 put->actions = subfacet->actions;
3563 put->actions_len = subfacet->actions_len;
3564 } else {
3565 compose_slow_path(ofproto, &facet->flow, subfacet->slow,
3566 op->stub, sizeof op->stub,
3567 &put->actions, &put->actions_len);
3568 }
501f8d1f
BP
3569 put->stats = NULL;
3570 }
3571}
3572
acf60855
JP
3573/* Handles flow miss 'miss'. May add any required datapath operations
3574 * to 'ops', incrementing '*n_ops' for each new op. */
9d6ac44e 3575static void
acf60855
JP
3576handle_flow_miss(struct flow_miss *miss, struct flow_miss_op *ops,
3577 size_t *n_ops)
9d6ac44e 3578{
acf60855 3579 struct ofproto_dpif *ofproto = miss->ofproto;
9d6ac44e 3580 struct facet *facet;
459b16a1 3581 long long int now;
9d6ac44e
BP
3582 uint32_t hash;
3583
3584 /* The caller must ensure that miss->hmap_node.hash contains
3585 * flow_hash(miss->flow, 0). */
3586 hash = miss->hmap_node.hash;
3587
3588 facet = facet_lookup_valid(ofproto, &miss->flow, hash);
3589 if (!facet) {
c57b2226
BP
3590 struct rule_dpif *rule = rule_dpif_lookup(ofproto, &miss->flow);
3591
3592 if (!flow_miss_should_make_facet(ofproto, miss, hash)) {
9d6ac44e
BP
3593 handle_flow_miss_without_facet(miss, rule, ops, n_ops);
3594 return;
3595 }
3596
3597 facet = facet_create(rule, &miss->flow, hash);
459b16a1
BP
3598 now = facet->used;
3599 } else {
3600 now = time_msec();
9d6ac44e 3601 }
459b16a1 3602 handle_flow_miss_with_facet(miss, facet, now, ops, n_ops);
9d6ac44e
BP
3603}
3604
8f73d537
EJ
3605static struct drop_key *
3606drop_key_lookup(const struct dpif_backer *backer, const struct nlattr *key,
3607 size_t key_len)
3608{
3609 struct drop_key *drop_key;
3610
3611 HMAP_FOR_EACH_WITH_HASH (drop_key, hmap_node, hash_bytes(key, key_len, 0),
3612 &backer->drop_keys) {
3613 if (drop_key->key_len == key_len
3614 && !memcmp(drop_key->key, key, key_len)) {
3615 return drop_key;
3616 }
3617 }
3618 return NULL;
3619}
3620
3621static void
3622drop_key_clear(struct dpif_backer *backer)
3623{
3624 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 15);
3625 struct drop_key *drop_key, *next;
3626
3627 HMAP_FOR_EACH_SAFE (drop_key, next, hmap_node, &backer->drop_keys) {
3628 int error;
3629
3630 error = dpif_flow_del(backer->dpif, drop_key->key, drop_key->key_len,
3631 NULL);
3632 if (error && !VLOG_DROP_WARN(&rl)) {
3633 struct ds ds = DS_EMPTY_INITIALIZER;
3634 odp_flow_key_format(drop_key->key, drop_key->key_len, &ds);
3635 VLOG_WARN("Failed to delete drop key (%s) (%s)", strerror(error),
3636 ds_cstr(&ds));
3637 ds_destroy(&ds);
3638 }
3639
3640 hmap_remove(&backer->drop_keys, &drop_key->hmap_node);
3641 free(drop_key->key);
3642 free(drop_key);
3643 }
3644}
3645
e09ee259
EJ
3646/* Given a datpath, packet, and flow metadata ('backer', 'packet', and 'key'
3647 * respectively), populates 'flow' with the result of odp_flow_key_to_flow().
3648 * Optionally, if nonnull, populates 'fitnessp' with the fitness of 'flow' as
3649 * returned by odp_flow_key_to_flow(). Also, optionally populates 'ofproto'
3650 * with the ofproto_dpif, and 'odp_in_port' with the datapath in_port, that
3651 * 'packet' ingressed.
e2a6ca36 3652 *
e09ee259
EJ
3653 * If 'ofproto' is nonnull, requires 'flow''s in_port to exist. Otherwise sets
3654 * 'flow''s in_port to OFPP_NONE.
3655 *
3656 * This function does post-processing on data returned from
3657 * odp_flow_key_to_flow() to help make VLAN splinters transparent to the rest
3658 * of the upcall processing logic. In particular, if the extracted in_port is
3659 * a VLAN splinter port, it replaces flow->in_port by the "real" port, sets
3660 * flow->vlan_tci correctly for the VLAN of the VLAN splinter port, and pushes
3661 * a VLAN header onto 'packet' (if it is nonnull).
3662 *
3663 * Optionally, if nonnull, sets '*initial_tci' to the VLAN TCI with which the
3664 * packet was really received, that is, the actual VLAN TCI extracted by
3665 * odp_flow_key_to_flow(). (This differs from the value returned in
3666 * flow->vlan_tci only for packets received on VLAN splinters.)
3667 *
b9ad7294
EJ
3668 * Similarly, this function also includes some logic to help with tunnels. It
3669 * may modify 'flow' as necessary to make the tunneling implementation
3670 * transparent to the upcall processing logic.
3671 *
e09ee259
EJ
3672 * Returns 0 if successful, ENODEV if the parsed flow has no associated ofport,
3673 * or some other positive errno if there are other problems. */
3674static int
3675ofproto_receive(const struct dpif_backer *backer, struct ofpbuf *packet,
3676 const struct nlattr *key, size_t key_len,
3677 struct flow *flow, enum odp_key_fitness *fitnessp,
3678 struct ofproto_dpif **ofproto, uint32_t *odp_in_port,
3679 ovs_be16 *initial_tci)
e84173dc 3680{
e09ee259
EJ
3681 const struct ofport_dpif *port;
3682 enum odp_key_fitness fitness;
b9ad7294 3683 int error = ENODEV;
e09ee259
EJ
3684
3685 fitness = odp_flow_key_to_flow(key, key_len, flow);
e84173dc 3686 if (fitness == ODP_FIT_ERROR) {
e09ee259
EJ
3687 error = EINVAL;
3688 goto exit;
3689 }
3690
3691 if (initial_tci) {
3692 *initial_tci = flow->vlan_tci;
e84173dc 3693 }
e84173dc 3694
e09ee259
EJ
3695 if (odp_in_port) {
3696 *odp_in_port = flow->in_port;
3697 }
3698
b9ad7294
EJ
3699 if (tnl_port_should_receive(flow)) {
3700 const struct ofport *ofport = tnl_port_receive(flow);
3701 if (!ofport) {
3702 flow->in_port = OFPP_NONE;
3703 goto exit;
3704 }
3705 port = ofport_dpif_cast(ofport);
e09ee259 3706
b9ad7294
EJ
3707 /* We can't reproduce 'key' from 'flow'. */
3708 fitness = fitness == ODP_FIT_PERFECT ? ODP_FIT_TOO_MUCH : fitness;
e09ee259 3709
b9ad7294
EJ
3710 /* XXX: Since the tunnel module is not scoped per backer, it's
3711 * theoretically possible that we'll receive an ofport belonging to an
3712 * entirely different datapath. In practice, this can't happen because
3713 * no platforms has two separate datapaths which each support
3714 * tunneling. */
3715 ovs_assert(ofproto_dpif_cast(port->up.ofproto)->backer == backer);
3716 } else {
3717 port = odp_port_to_ofport(backer, flow->in_port);
3718 if (!port) {
3719 flow->in_port = OFPP_NONE;
3720 goto exit;
3721 }
3722
3723 flow->in_port = port->up.ofp_port;
3724 if (vsp_adjust_flow(ofproto_dpif_cast(port->up.ofproto), flow)) {
3725 if (packet) {
3726 /* Make the packet resemble the flow, so that it gets sent to
3727 * an OpenFlow controller properly, so that it looks correct
3728 * for sFlow, and so that flow_extract() will get the correct
3729 * vlan_tci if it is called on 'packet'.
3730 *
3731 * The allocated space inside 'packet' probably also contains
3732 * 'key', that is, both 'packet' and 'key' are probably part of
3733 * a struct dpif_upcall (see the large comment on that
3734 * structure definition), so pushing data on 'packet' is in
3735 * general not a good idea since it could overwrite 'key' or
3736 * free it as a side effect. However, it's OK in this special
3737 * case because we know that 'packet' is inside a Netlink
3738 * attribute: pushing 4 bytes will just overwrite the 4-byte
3739 * "struct nlattr", which is fine since we don't need that
3740 * header anymore. */
3741 eth_push_vlan(packet, flow->vlan_tci);
3742 }
3743 /* We can't reproduce 'key' from 'flow'. */
3744 fitness = fitness == ODP_FIT_PERFECT ? ODP_FIT_TOO_MUCH : fitness;
52a90c29
BP
3745 }
3746 }
e09ee259 3747 error = 0;
52a90c29 3748
b9ad7294
EJ
3749 if (ofproto) {
3750 *ofproto = ofproto_dpif_cast(port->up.ofproto);
3751 }
3752
e09ee259
EJ
3753exit:
3754 if (fitnessp) {
3755 *fitnessp = fitness;
3756 }
3757 return error;
e84173dc
BP
3758}
3759
501f8d1f 3760static void
acf60855 3761handle_miss_upcalls(struct dpif_backer *backer, struct dpif_upcall *upcalls,
501f8d1f
BP
3762 size_t n_upcalls)
3763{
3764 struct dpif_upcall *upcall;
b23cdad9
BP
3765 struct flow_miss *miss;
3766 struct flow_miss misses[FLOW_MISS_MAX_BATCH];
501f8d1f 3767 struct flow_miss_op flow_miss_ops[FLOW_MISS_MAX_BATCH * 2];
c2b565b5 3768 struct dpif_op *dpif_ops[FLOW_MISS_MAX_BATCH * 2];
501f8d1f 3769 struct hmap todo;
b23cdad9 3770 int n_misses;
501f8d1f
BP
3771 size_t n_ops;
3772 size_t i;
3773
3774 if (!n_upcalls) {
3775 return;
3776 }
3777
3778 /* Construct the to-do list.
3779 *
3780 * This just amounts to extracting the flow from each packet and sticking
3781 * the packets that have the same flow in the same "flow_miss" structure so
3782 * that we can process them together. */
3783 hmap_init(&todo);
b23cdad9 3784 n_misses = 0;
501f8d1f 3785 for (upcall = upcalls; upcall < &upcalls[n_upcalls]; upcall++) {
b23cdad9
BP
3786 struct flow_miss *miss = &misses[n_misses];
3787 struct flow_miss *existing_miss;
acf60855 3788 struct ofproto_dpif *ofproto;
a088a1ff 3789 uint32_t odp_in_port;
1d446463 3790 struct flow flow;
b23cdad9 3791 uint32_t hash;
e09ee259 3792 int error;
501f8d1f 3793
e09ee259
EJ
3794 error = ofproto_receive(backer, upcall->packet, upcall->key,
3795 upcall->key_len, &flow, &miss->key_fitness,
3796 &ofproto, &odp_in_port, &miss->initial_tci);
3797 if (error == ENODEV) {
8f73d537
EJ
3798 struct drop_key *drop_key;
3799
acf60855
JP
3800 /* Received packet on port for which we couldn't associate
3801 * an ofproto. This can happen if a port is removed while
3802 * traffic is being received. Print a rate-limited message
8f73d537
EJ
3803 * in case it happens frequently. Install a drop flow so
3804 * that future packets of the flow are inexpensively dropped
3805 * in the kernel. */
acf60855
JP
3806 VLOG_INFO_RL(&rl, "received packet on unassociated port %"PRIu32,
3807 flow.in_port);
8f73d537
EJ
3808
3809 drop_key = drop_key_lookup(backer, upcall->key, upcall->key_len);
3810 if (!drop_key) {
3811 drop_key = xmalloc(sizeof *drop_key);
3812 drop_key->key = xmemdup(upcall->key, upcall->key_len);
3813 drop_key->key_len = upcall->key_len;
3814
3815 hmap_insert(&backer->drop_keys, &drop_key->hmap_node,
3816 hash_bytes(drop_key->key, drop_key->key_len, 0));
3817 dpif_flow_put(backer->dpif, DPIF_FP_CREATE | DPIF_FP_MODIFY,
3818 drop_key->key, drop_key->key_len, NULL, 0, NULL);
3819 }
3820 continue;
acf60855 3821 }
e09ee259 3822 if (error) {
b0f7b9b5
BP
3823 continue;
3824 }
72e8bf28 3825 flow_extract(upcall->packet, flow.skb_priority, flow.skb_mark,
1d446463 3826 &flow.tunnel, flow.in_port, &miss->flow);
501f8d1f 3827
501f8d1f 3828 /* Add other packets to a to-do list. */
b23cdad9 3829 hash = flow_hash(&miss->flow, 0);
ddbc5954 3830 existing_miss = flow_miss_find(&todo, ofproto, &miss->flow, hash);
b23cdad9
BP
3831 if (!existing_miss) {
3832 hmap_insert(&todo, &miss->hmap_node, hash);
acf60855 3833 miss->ofproto = ofproto;
b23cdad9
BP
3834 miss->key = upcall->key;
3835 miss->key_len = upcall->key_len;
6a7e895f 3836 miss->upcall_type = upcall->type;
a088a1ff 3837 miss->odp_in_port = odp_in_port;
b23cdad9
BP
3838 list_init(&miss->packets);
3839
3840 n_misses++;
3841 } else {
3842 miss = existing_miss;
3843 }
501f8d1f
BP
3844 list_push_back(&miss->packets, &upcall->packet->list_node);
3845 }
3846
3847 /* Process each element in the to-do list, constructing the set of
3848 * operations to batch. */
3849 n_ops = 0;
33bb0caa 3850 HMAP_FOR_EACH (miss, hmap_node, &todo) {
acf60855 3851 handle_flow_miss(miss, flow_miss_ops, &n_ops);
abe529af 3852 }
cb22974d 3853 ovs_assert(n_ops <= ARRAY_SIZE(flow_miss_ops));
501f8d1f
BP
3854
3855 /* Execute batch. */
3856 for (i = 0; i < n_ops; i++) {
3857 dpif_ops[i] = &flow_miss_ops[i].dpif_op;
3858 }
acf60855 3859 dpif_operate(backer->dpif, dpif_ops, n_ops);
501f8d1f
BP
3860
3861 /* Free memory and update facets. */
3862 for (i = 0; i < n_ops; i++) {
3863 struct flow_miss_op *op = &flow_miss_ops[i];
501f8d1f
BP
3864
3865 switch (op->dpif_op.type) {
3866 case DPIF_OP_EXECUTE:
501f8d1f 3867 break;
abe529af 3868
501f8d1f 3869 case DPIF_OP_FLOW_PUT:
c2b565b5 3870 if (!op->dpif_op.error) {
6a7e895f 3871 op->subfacet->path = subfacet_want_path(op->subfacet->slow);
501f8d1f
BP
3872 }
3873 break;
b99d3cee
BP
3874
3875 case DPIF_OP_FLOW_DEL:
3876 NOT_REACHED();
501f8d1f 3877 }
5fe20d5d
BP
3878
3879 free(op->garbage);
501f8d1f 3880 }
33bb0caa 3881 hmap_destroy(&todo);
abe529af
BP
3882}
3883
6a7e895f
BP
3884static enum { SFLOW_UPCALL, MISS_UPCALL, BAD_UPCALL }
3885classify_upcall(const struct dpif_upcall *upcall)
3886{
3887 union user_action_cookie cookie;
3888
3889 /* First look at the upcall type. */
3890 switch (upcall->type) {
3891 case DPIF_UC_ACTION:
3892 break;
3893
3894 case DPIF_UC_MISS:
3895 return MISS_UPCALL;
3896
3897 case DPIF_N_UC_TYPES:
3898 default:
3899 VLOG_WARN_RL(&rl, "upcall has unexpected type %"PRIu32, upcall->type);
3900 return BAD_UPCALL;
3901 }
3902
3903 /* "action" upcalls need a closer look. */
e995e3df
BP
3904 if (!upcall->userdata) {
3905 VLOG_WARN_RL(&rl, "action upcall missing cookie");
3906 return BAD_UPCALL;
3907 }
3908 if (nl_attr_get_size(upcall->userdata) != sizeof(cookie)) {
3909 VLOG_WARN_RL(&rl, "action upcall cookie has unexpected size %zu",
3910 nl_attr_get_size(upcall->userdata));
3911 return BAD_UPCALL;
3912 }
3913 memcpy(&cookie, nl_attr_get(upcall->userdata), sizeof(cookie));
6a7e895f
BP
3914 switch (cookie.type) {
3915 case USER_ACTION_COOKIE_SFLOW:
3916 return SFLOW_UPCALL;
3917
3918 case USER_ACTION_COOKIE_SLOW_PATH:
3919 return MISS_UPCALL;
3920
3921 case USER_ACTION_COOKIE_UNSPEC:
3922 default:
e995e3df
BP
3923 VLOG_WARN_RL(&rl, "invalid user cookie : 0x%"PRIx64,
3924 nl_attr_get_u64(upcall->userdata));
6a7e895f
BP
3925 return BAD_UPCALL;
3926 }
3927}
3928
abe529af 3929static void
acf60855 3930handle_sflow_upcall(struct dpif_backer *backer,
6a7e895f 3931 const struct dpif_upcall *upcall)
abe529af 3932{
acf60855 3933 struct ofproto_dpif *ofproto;
1673e0e4 3934 union user_action_cookie cookie;
e84173dc 3935 struct flow flow;
e1b1d06a 3936 uint32_t odp_in_port;
abe529af 3937
e09ee259
EJ
3938 if (ofproto_receive(backer, upcall->packet, upcall->key, upcall->key_len,
3939 &flow, NULL, &ofproto, &odp_in_port, NULL)
3940 || !ofproto->sflow) {
e84173dc
BP
3941 return;
3942 }
3943
e995e3df 3944 memcpy(&cookie, nl_attr_get(upcall->userdata), sizeof(cookie));
e1b1d06a
JP
3945 dpif_sflow_received(ofproto->sflow, upcall->packet, &flow,
3946 odp_in_port, &cookie);
6ff686f2
PS
3947}
3948
9b16c439 3949static int
acf60855 3950handle_upcalls(struct dpif_backer *backer, unsigned int max_batch)
6ff686f2 3951{
9b16c439 3952 struct dpif_upcall misses[FLOW_MISS_MAX_BATCH];
90a7c55e
BP
3953 struct ofpbuf miss_bufs[FLOW_MISS_MAX_BATCH];
3954 uint64_t miss_buf_stubs[FLOW_MISS_MAX_BATCH][4096 / 8];
3955 int n_processed;
9b16c439
BP
3956 int n_misses;
3957 int i;
abe529af 3958
cb22974d 3959 ovs_assert(max_batch <= FLOW_MISS_MAX_BATCH);
abe529af 3960
9b16c439 3961 n_misses = 0;
90a7c55e 3962 for (n_processed = 0; n_processed < max_batch; n_processed++) {
9b16c439 3963 struct dpif_upcall *upcall = &misses[n_misses];
90a7c55e 3964 struct ofpbuf *buf = &miss_bufs[n_misses];
9b16c439
BP
3965 int error;
3966
90a7c55e
BP
3967 ofpbuf_use_stub(buf, miss_buf_stubs[n_misses],
3968 sizeof miss_buf_stubs[n_misses]);
acf60855 3969 error = dpif_recv(backer->dpif, upcall, buf);
9b16c439 3970 if (error) {
90a7c55e 3971 ofpbuf_uninit(buf);
9b16c439
BP
3972 break;
3973 }
3974
6a7e895f
BP
3975 switch (classify_upcall(upcall)) {
3976 case MISS_UPCALL:
9b16c439
BP
3977 /* Handle it later. */
3978 n_misses++;
3979 break;
3980
6a7e895f 3981 case SFLOW_UPCALL:
acf60855 3982 handle_sflow_upcall(backer, upcall);
6a7e895f
BP
3983 ofpbuf_uninit(buf);
3984 break;
3985
3986 case BAD_UPCALL:
3987 ofpbuf_uninit(buf);
9b16c439
BP
3988 break;
3989 }
abe529af 3990 }
9b16c439 3991
6a7e895f 3992 /* Handle deferred MISS_UPCALL processing. */
acf60855 3993 handle_miss_upcalls(backer, misses, n_misses);
90a7c55e
BP
3994 for (i = 0; i < n_misses; i++) {
3995 ofpbuf_uninit(&miss_bufs[i]);
3996 }
9b16c439 3997
90a7c55e 3998 return n_processed;
abe529af
BP
3999}
4000\f
4001/* Flow expiration. */
4002
b0f7b9b5 4003static int subfacet_max_idle(const struct ofproto_dpif *);
acf60855 4004static void update_stats(struct dpif_backer *);
abe529af 4005static void rule_expire(struct rule_dpif *);
b0f7b9b5 4006static void expire_subfacets(struct ofproto_dpif *, int dp_max_idle);
abe529af
BP
4007
4008/* This function is called periodically by run(). Its job is to collect
4009 * updates for the flows that have been installed into the datapath, most
4010 * importantly when they last were used, and then use that information to
4011 * expire flows that have not been used recently.
4012 *
4013 * Returns the number of milliseconds after which it should be called again. */
4014static int
acf60855 4015expire(struct dpif_backer *backer)
abe529af 4016{
acf60855
JP
4017 struct ofproto_dpif *ofproto;
4018 int max_idle = INT32_MAX;
abe529af 4019
8f73d537
EJ
4020 /* Periodically clear out the drop keys in an effort to keep them
4021 * relatively few. */
4022 drop_key_clear(backer);
4023
acf60855
JP
4024 /* Update stats for each flow in the backer. */
4025 update_stats(backer);
abe529af 4026
acf60855 4027 HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_node, &all_ofproto_dpifs) {
e503cc19 4028 struct rule *rule, *next_rule;
acf60855 4029 int dp_max_idle;
abe529af 4030
acf60855
JP
4031 if (ofproto->backer != backer) {
4032 continue;
4033 }
0697b5c3 4034
acf60855
JP
4035 /* Expire subfacets that have been idle too long. */
4036 dp_max_idle = subfacet_max_idle(ofproto);
4037 expire_subfacets(ofproto, dp_max_idle);
4038
4039 max_idle = MIN(max_idle, dp_max_idle);
4040
4041 /* Expire OpenFlow flows whose idle_timeout or hard_timeout
4042 * has passed. */
e503cc19
SH
4043 LIST_FOR_EACH_SAFE (rule, next_rule, expirable,
4044 &ofproto->up.expirable) {
4045 rule_expire(rule_dpif_cast(rule));
0697b5c3 4046 }
abe529af 4047
acf60855
JP
4048 /* All outstanding data in existing flows has been accounted, so it's a
4049 * good time to do bond rebalancing. */
4050 if (ofproto->has_bonded_bundles) {
4051 struct ofbundle *bundle;
abe529af 4052
acf60855
JP
4053 HMAP_FOR_EACH (bundle, hmap_node, &ofproto->bundles) {
4054 if (bundle->bond) {
2cc3c58e 4055 bond_rebalance(bundle->bond, &backer->revalidate_set);
acf60855 4056 }
abe529af
BP
4057 }
4058 }
4059 }
4060
acf60855 4061 return MIN(max_idle, 1000);
abe529af
BP
4062}
4063
a218c879
BP
4064/* Updates flow table statistics given that the datapath just reported 'stats'
4065 * as 'subfacet''s statistics. */
4066static void
4067update_subfacet_stats(struct subfacet *subfacet,
4068 const struct dpif_flow_stats *stats)
4069{
4070 struct facet *facet = subfacet->facet;
4071
4072 if (stats->n_packets >= subfacet->dp_packet_count) {
4073 uint64_t extra = stats->n_packets - subfacet->dp_packet_count;
4074 facet->packet_count += extra;
4075 } else {
4076 VLOG_WARN_RL(&rl, "unexpected packet count from the datapath");
4077 }
4078
4079 if (stats->n_bytes >= subfacet->dp_byte_count) {
4080 facet->byte_count += stats->n_bytes - subfacet->dp_byte_count;
4081 } else {
4082 VLOG_WARN_RL(&rl, "unexpected byte count from datapath");
4083 }
4084
4085 subfacet->dp_packet_count = stats->n_packets;
4086 subfacet->dp_byte_count = stats->n_bytes;
4087
4088 facet->tcp_flags |= stats->tcp_flags;
4089
4090 subfacet_update_time(subfacet, stats->used);
4091 if (facet->accounted_bytes < facet->byte_count) {
4092 facet_learn(facet);
4093 facet_account(facet);
4094 facet->accounted_bytes = facet->byte_count;
4095 }
4096 facet_push_stats(facet);
4097}
4098
4099/* 'key' with length 'key_len' bytes is a flow in 'dpif' that we know nothing
4100 * about, or a flow that shouldn't be installed but was anyway. Delete it. */
4101static void
acf60855 4102delete_unexpected_flow(struct ofproto_dpif *ofproto,
a218c879
BP
4103 const struct nlattr *key, size_t key_len)
4104{
4105 if (!VLOG_DROP_WARN(&rl)) {
4106 struct ds s;
4107
4108 ds_init(&s);
4109 odp_flow_key_format(key, key_len, &s);
acf60855 4110 VLOG_WARN("unexpected flow on %s: %s", ofproto->up.name, ds_cstr(&s));
a218c879
BP
4111 ds_destroy(&s);
4112 }
4113
4114 COVERAGE_INC(facet_unexpected);
acf60855 4115 dpif_flow_del(ofproto->backer->dpif, key, key_len, NULL);
a218c879
BP
4116}
4117
abe529af
BP
4118/* Update 'packet_count', 'byte_count', and 'used' members of installed facets.
4119 *
4120 * This function also pushes statistics updates to rules which each facet
4121 * resubmits into. Generally these statistics will be accurate. However, if a
4122 * facet changes the rule it resubmits into at some time in between
4123 * update_stats() runs, it is possible that statistics accrued to the
4124 * old rule will be incorrectly attributed to the new rule. This could be
4125 * avoided by calling update_stats() whenever rules are created or
4126 * deleted. However, the performance impact of making so many calls to the
4127 * datapath do not justify the benefit of having perfectly accurate statistics.
4128 */
4129static void
acf60855 4130update_stats(struct dpif_backer *backer)
abe529af
BP
4131{
4132 const struct dpif_flow_stats *stats;
4133 struct dpif_flow_dump dump;
4134 const struct nlattr *key;
4135 size_t key_len;
4136
acf60855 4137 dpif_flow_dump_start(&dump, backer->dpif);
abe529af 4138 while (dpif_flow_dump_next(&dump, &key, &key_len, NULL, NULL, &stats)) {
acf60855 4139 struct flow flow;
b0f7b9b5 4140 struct subfacet *subfacet;
acf60855 4141 struct ofproto_dpif *ofproto;
b9ad7294 4142 struct ofport_dpif *ofport;
acf60855 4143 uint32_t key_hash;
abe529af 4144
58c6adda
EJ
4145 if (ofproto_receive(backer, NULL, key, key_len, &flow, NULL, &ofproto,
4146 NULL, NULL)) {
acf60855
JP
4147 continue;
4148 }
4149
b9ad7294
EJ
4150 ofport = get_ofp_port(ofproto, flow.in_port);
4151 if (ofport && ofport->tnl_port) {
4152 netdev_vport_inc_rx(ofport->up.netdev, stats);
4153 }
4154
acf60855 4155 key_hash = odp_flow_key_hash(key, key_len);
acf60855 4156 subfacet = subfacet_find(ofproto, key, key_len, key_hash, &flow);
6a7e895f
BP
4157 switch (subfacet ? subfacet->path : SF_NOT_INSTALLED) {
4158 case SF_FAST_PATH:
a218c879 4159 update_subfacet_stats(subfacet, stats);
6a7e895f
BP
4160 break;
4161
4162 case SF_SLOW_PATH:
4163 /* Stats are updated per-packet. */
4164 break;
4165
4166 case SF_NOT_INSTALLED:
4167 default:
acf60855 4168 delete_unexpected_flow(ofproto, key, key_len);
6a7e895f 4169 break;
abe529af
BP
4170 }
4171 }
4172 dpif_flow_dump_done(&dump);
4173}
4174
4175/* Calculates and returns the number of milliseconds of idle time after which
b0f7b9b5
BP
4176 * subfacets should expire from the datapath. When a subfacet expires, we fold
4177 * its statistics into its facet, and when a facet's last subfacet expires, we
4178 * fold its statistic into its rule. */
abe529af 4179static int
b0f7b9b5 4180subfacet_max_idle(const struct ofproto_dpif *ofproto)
abe529af
BP
4181{
4182 /*
4183 * Idle time histogram.
4184 *
b0f7b9b5
BP
4185 * Most of the time a switch has a relatively small number of subfacets.
4186 * When this is the case we might as well keep statistics for all of them
4187 * in userspace and to cache them in the kernel datapath for performance as
abe529af
BP
4188 * well.
4189 *
b0f7b9b5 4190 * As the number of subfacets increases, the memory required to maintain
abe529af 4191 * statistics about them in userspace and in the kernel becomes
b0f7b9b5
BP
4192 * significant. However, with a large number of subfacets it is likely
4193 * that only a few of them are "heavy hitters" that consume a large amount
4194 * of bandwidth. At this point, only heavy hitters are worth caching in
4195 * the kernel and maintaining in userspaces; other subfacets we can
4196 * discard.
abe529af
BP
4197 *
4198 * The technique used to compute the idle time is to build a histogram with
b0f7b9b5 4199 * N_BUCKETS buckets whose width is BUCKET_WIDTH msecs each. Each subfacet
abe529af
BP
4200 * that is installed in the kernel gets dropped in the appropriate bucket.
4201 * After the histogram has been built, we compute the cutoff so that only
b0f7b9b5 4202 * the most-recently-used 1% of subfacets (but at least
084f5290 4203 * ofproto->up.flow_eviction_threshold flows) are kept cached. At least
b0f7b9b5
BP
4204 * the most-recently-used bucket of subfacets is kept, so actually an
4205 * arbitrary number of subfacets can be kept in any given expiration run
084f5290
SH
4206 * (though the next run will delete most of those unless they receive
4207 * additional data).
abe529af 4208 *
b0f7b9b5
BP
4209 * This requires a second pass through the subfacets, in addition to the
4210 * pass made by update_stats(), because the former function never looks at
4211 * uninstallable subfacets.
abe529af
BP
4212 */
4213 enum { BUCKET_WIDTH = ROUND_UP(100, TIME_UPDATE_INTERVAL) };
4214 enum { N_BUCKETS = 5000 / BUCKET_WIDTH };
4215 int buckets[N_BUCKETS] = { 0 };
f11c1ef4 4216 int total, subtotal, bucket;
b0f7b9b5 4217 struct subfacet *subfacet;
abe529af
BP
4218 long long int now;
4219 int i;
4220
b0f7b9b5 4221 total = hmap_count(&ofproto->subfacets);
084f5290 4222 if (total <= ofproto->up.flow_eviction_threshold) {
abe529af
BP
4223 return N_BUCKETS * BUCKET_WIDTH;
4224 }
4225
4226 /* Build histogram. */
4227 now = time_msec();
b0f7b9b5
BP
4228 HMAP_FOR_EACH (subfacet, hmap_node, &ofproto->subfacets) {
4229 long long int idle = now - subfacet->used;
abe529af
BP
4230 int bucket = (idle <= 0 ? 0
4231 : idle >= BUCKET_WIDTH * N_BUCKETS ? N_BUCKETS - 1
4232 : (unsigned int) idle / BUCKET_WIDTH);
4233 buckets[bucket]++;
4234 }
4235
4236 /* Find the first bucket whose flows should be expired. */
f11c1ef4
SH
4237 subtotal = bucket = 0;
4238 do {
4239 subtotal += buckets[bucket++];
084f5290
SH
4240 } while (bucket < N_BUCKETS &&
4241 subtotal < MAX(ofproto->up.flow_eviction_threshold, total / 100));
abe529af
BP
4242
4243 if (VLOG_IS_DBG_ENABLED()) {
4244 struct ds s;
4245
4246 ds_init(&s);
4247 ds_put_cstr(&s, "keep");
4248 for (i = 0; i < N_BUCKETS; i++) {
4249 if (i == bucket) {
4250 ds_put_cstr(&s, ", drop");
4251 }
4252 if (buckets[i]) {
4253 ds_put_format(&s, " %d:%d", i * BUCKET_WIDTH, buckets[i]);
4254 }
4255 }
4256 VLOG_INFO("%s: %s (msec:count)", ofproto->up.name, ds_cstr(&s));
4257 ds_destroy(&s);
4258 }
4259
4260 return bucket * BUCKET_WIDTH;
4261}
4262
abe529af 4263static void
b0f7b9b5 4264expire_subfacets(struct ofproto_dpif *ofproto, int dp_max_idle)
abe529af 4265{
625b0720
BP
4266 /* Cutoff time for most flows. */
4267 long long int normal_cutoff = time_msec() - dp_max_idle;
4268
4269 /* We really want to keep flows for special protocols around, so use a more
4270 * conservative cutoff. */
4271 long long int special_cutoff = time_msec() - 10000;
b99d3cee 4272
b0f7b9b5 4273 struct subfacet *subfacet, *next_subfacet;
1d85f9e5 4274 struct subfacet *batch[SUBFACET_DESTROY_MAX_BATCH];
b99d3cee 4275 int n_batch;
abe529af 4276
b99d3cee 4277 n_batch = 0;
b0f7b9b5
BP
4278 HMAP_FOR_EACH_SAFE (subfacet, next_subfacet, hmap_node,
4279 &ofproto->subfacets) {
625b0720
BP
4280 long long int cutoff;
4281
4282 cutoff = (subfacet->slow & (SLOW_CFM | SLOW_LACP | SLOW_STP)
4283 ? special_cutoff
4284 : normal_cutoff);
b0f7b9b5 4285 if (subfacet->used < cutoff) {
6a7e895f 4286 if (subfacet->path != SF_NOT_INSTALLED) {
b99d3cee 4287 batch[n_batch++] = subfacet;
1d85f9e5
JP
4288 if (n_batch >= SUBFACET_DESTROY_MAX_BATCH) {
4289 subfacet_destroy_batch(ofproto, batch, n_batch);
b99d3cee
BP
4290 n_batch = 0;
4291 }
4292 } else {
4293 subfacet_destroy(subfacet);
4294 }
abe529af
BP
4295 }
4296 }
b99d3cee
BP
4297
4298 if (n_batch > 0) {
1d85f9e5 4299 subfacet_destroy_batch(ofproto, batch, n_batch);
b99d3cee 4300 }
abe529af
BP
4301}
4302
4303/* If 'rule' is an OpenFlow rule, that has expired according to OpenFlow rules,
4304 * then delete it entirely. */
4305static void
4306rule_expire(struct rule_dpif *rule)
4307{
abe529af
BP
4308 struct facet *facet, *next_facet;
4309 long long int now;
4310 uint8_t reason;
4311
e2a3d183
BP
4312 if (rule->up.pending) {
4313 /* We'll have to expire it later. */
4314 return;
4315 }
4316
abe529af
BP
4317 /* Has 'rule' expired? */
4318 now = time_msec();
4319 if (rule->up.hard_timeout
308881af 4320 && now > rule->up.modified + rule->up.hard_timeout * 1000) {
abe529af 4321 reason = OFPRR_HARD_TIMEOUT;
8ea6ac3e 4322 } else if (rule->up.idle_timeout
1745cd08 4323 && now > rule->up.used + rule->up.idle_timeout * 1000) {
abe529af
BP
4324 reason = OFPRR_IDLE_TIMEOUT;
4325 } else {
4326 return;
4327 }
4328
4329 COVERAGE_INC(ofproto_dpif_expired);
4330
4331 /* Update stats. (This is a no-op if the rule expired due to an idle
4332 * timeout, because that only happens when the rule has no facets left.) */
4333 LIST_FOR_EACH_SAFE (facet, next_facet, list_node, &rule->facets) {
15baa734 4334 facet_remove(facet);
abe529af
BP
4335 }
4336
4337 /* Get rid of the rule. */
4338 ofproto_rule_expire(&rule->up, reason);
4339}
4340\f
4341/* Facets. */
4342
f3827897 4343/* Creates and returns a new facet owned by 'rule', given a 'flow'.
abe529af
BP
4344 *
4345 * The caller must already have determined that no facet with an identical
4346 * 'flow' exists in 'ofproto' and that 'flow' is the best match for 'rule' in
f3827897
BP
4347 * the ofproto's classifier table.
4348 *
2b459b83
BP
4349 * 'hash' must be the return value of flow_hash(flow, 0).
4350 *
b0f7b9b5
BP
4351 * The facet will initially have no subfacets. The caller should create (at
4352 * least) one subfacet with subfacet_create(). */
abe529af 4353static struct facet *
2b459b83 4354facet_create(struct rule_dpif *rule, const struct flow *flow, uint32_t hash)
abe529af
BP
4355{
4356 struct ofproto_dpif *ofproto = ofproto_dpif_cast(rule->up.ofproto);
4357 struct facet *facet;
4358
4359 facet = xzalloc(sizeof *facet);
4360 facet->used = time_msec();
2b459b83 4361 hmap_insert(&ofproto->facets, &facet->hmap_node, hash);
abe529af
BP
4362 list_push_back(&rule->facets, &facet->list_node);
4363 facet->rule = rule;
4364 facet->flow = *flow;
b0f7b9b5 4365 list_init(&facet->subfacets);
abe529af
BP
4366 netflow_flow_init(&facet->nf_flow);
4367 netflow_flow_update_time(ofproto->netflow, &facet->nf_flow, facet->used);
4368
abe529af
BP
4369 return facet;
4370}
4371
4372static void
4373facet_free(struct facet *facet)
4374{
abe529af
BP
4375 free(facet);
4376}
4377
3d9e05f8 4378/* Executes, within 'ofproto', the 'n_actions' actions in 'actions' on
0a740f48 4379 * 'packet', which arrived on 'in_port'. */
3d9e05f8
BP
4380static bool
4381execute_odp_actions(struct ofproto_dpif *ofproto, const struct flow *flow,
4382 const struct nlattr *odp_actions, size_t actions_len,
4383 struct ofpbuf *packet)
4384{
4385 struct odputil_keybuf keybuf;
4386 struct ofpbuf key;
4387 int error;
4388
6ff686f2 4389 ofpbuf_use_stack(&key, &keybuf, sizeof keybuf);
e1b1d06a
JP
4390 odp_flow_key_from_flow(&key, flow,
4391 ofp_port_to_odp_port(ofproto, flow->in_port));
80e5eed9 4392
acf60855 4393 error = dpif_execute(ofproto->backer->dpif, key.data, key.size,
6ff686f2 4394 odp_actions, actions_len, packet);
6ff686f2 4395 return !error;
abe529af
BP
4396}
4397
abe529af
BP
4398/* Remove 'facet' from 'ofproto' and free up the associated memory:
4399 *
4400 * - If 'facet' was installed in the datapath, uninstalls it and updates its
b0f7b9b5 4401 * rule's statistics, via subfacet_uninstall().
abe529af
BP
4402 *
4403 * - Removes 'facet' from its rule and from ofproto->facets.
4404 */
4405static void
15baa734 4406facet_remove(struct facet *facet)
abe529af 4407{
15baa734 4408 struct ofproto_dpif *ofproto = ofproto_dpif_cast(facet->rule->up.ofproto);
b0f7b9b5
BP
4409 struct subfacet *subfacet, *next_subfacet;
4410
cb22974d 4411 ovs_assert(!list_is_empty(&facet->subfacets));
551a2f6c
BP
4412
4413 /* First uninstall all of the subfacets to get final statistics. */
4414 LIST_FOR_EACH (subfacet, list_node, &facet->subfacets) {
15baa734 4415 subfacet_uninstall(subfacet);
551a2f6c
BP
4416 }
4417
4418 /* Flush the final stats to the rule.
4419 *
4420 * This might require us to have at least one subfacet around so that we
4421 * can use its actions for accounting in facet_account(), which is why we
4422 * have uninstalled but not yet destroyed the subfacets. */
15baa734 4423 facet_flush_stats(facet);
551a2f6c
BP
4424
4425 /* Now we're really all done so destroy everything. */
b0f7b9b5
BP
4426 LIST_FOR_EACH_SAFE (subfacet, next_subfacet, list_node,
4427 &facet->subfacets) {
15baa734 4428 subfacet_destroy__(subfacet);
b0f7b9b5 4429 }
abe529af
BP
4430 hmap_remove(&ofproto->facets, &facet->hmap_node);
4431 list_remove(&facet->list_node);
4432 facet_free(facet);
4433}
4434
3de9590b
BP
4435/* Feed information from 'facet' back into the learning table to keep it in
4436 * sync with what is actually flowing through the datapath. */
abe529af 4437static void
3de9590b 4438facet_learn(struct facet *facet)
abe529af 4439{
15baa734 4440 struct ofproto_dpif *ofproto = ofproto_dpif_cast(facet->rule->up.ofproto);
3de9590b 4441 struct action_xlate_ctx ctx;
abe529af 4442
3de9590b
BP
4443 if (!facet->has_learn
4444 && !facet->has_normal
4445 && (!facet->has_fin_timeout
4446 || !(facet->tcp_flags & (TCP_FIN | TCP_RST)))) {
abe529af
BP
4447 return;
4448 }
abe529af 4449
3de9590b
BP
4450 action_xlate_ctx_init(&ctx, ofproto, &facet->flow,
4451 facet->flow.vlan_tci,
4452 facet->rule, facet->tcp_flags, NULL);
4453 ctx.may_learn = true;
f25d0cf3
BP
4454 xlate_actions_for_side_effects(&ctx, facet->rule->up.ofpacts,
4455 facet->rule->up.ofpacts_len);
3de9590b
BP
4456}
4457
4458static void
4459facet_account(struct facet *facet)
4460{
4461 struct ofproto_dpif *ofproto = ofproto_dpif_cast(facet->rule->up.ofproto);
4462 struct subfacet *subfacet;
4463 const struct nlattr *a;
4464 unsigned int left;
4465 ovs_be16 vlan_tci;
4466 uint64_t n_bytes;
abe529af 4467
75a75043 4468 if (!facet->has_normal || !ofproto->has_bonded_bundles) {
abe529af
BP
4469 return;
4470 }
3de9590b 4471 n_bytes = facet->byte_count - facet->accounted_bytes;
d78be13b
BP
4472
4473 /* This loop feeds byte counters to bond_account() for rebalancing to use
4474 * as a basis. We also need to track the actual VLAN on which the packet
4475 * is going to be sent to ensure that it matches the one passed to
4476 * bond_choose_output_slave(). (Otherwise, we will account to the wrong
b95fc6ba
BP
4477 * hash bucket.)
4478 *
4479 * We use the actions from an arbitrary subfacet because they should all
4480 * be equally valid for our purpose. */
4481 subfacet = CONTAINER_OF(list_front(&facet->subfacets),
4482 struct subfacet, list_node);
d78be13b 4483 vlan_tci = facet->flow.vlan_tci;
b95fc6ba
BP
4484 NL_ATTR_FOR_EACH_UNSAFE (a, left,
4485 subfacet->actions, subfacet->actions_len) {
fea393b1 4486 const struct ovs_action_push_vlan *vlan;
d78be13b 4487 struct ofport_dpif *port;
abe529af 4488
d78be13b 4489 switch (nl_attr_type(a)) {
df2c07f4 4490 case OVS_ACTION_ATTR_OUTPUT:
abe529af
BP
4491 port = get_odp_port(ofproto, nl_attr_get_u32(a));
4492 if (port && port->bundle && port->bundle->bond) {
d78be13b 4493 bond_account(port->bundle->bond, &facet->flow,
dc155bff 4494 vlan_tci_to_vid(vlan_tci), n_bytes);
abe529af 4495 }
d78be13b
BP
4496 break;
4497
fea393b1
BP
4498 case OVS_ACTION_ATTR_POP_VLAN:
4499 vlan_tci = htons(0);
d78be13b
BP
4500 break;
4501
fea393b1
BP
4502 case OVS_ACTION_ATTR_PUSH_VLAN:
4503 vlan = nl_attr_get(a);
4504 vlan_tci = vlan->vlan_tci;
d78be13b 4505 break;
abe529af
BP
4506 }
4507 }
4508}
4509
abe529af
BP
4510/* Returns true if the only action for 'facet' is to send to the controller.
4511 * (We don't report NetFlow expiration messages for such facets because they
4512 * are just part of the control logic for the network, not real traffic). */
4513static bool
4514facet_is_controller_flow(struct facet *facet)
4515{
f25d0cf3
BP
4516 if (facet) {
4517 const struct rule *rule = &facet->rule->up;
4518 const struct ofpact *ofpacts = rule->ofpacts;
4519 size_t ofpacts_len = rule->ofpacts_len;
4520
dd30ff28
BP
4521 if (ofpacts_len > 0 &&
4522 ofpacts->type == OFPACT_CONTROLLER &&
f25d0cf3
BP
4523 ofpact_next(ofpacts) >= ofpact_end(ofpacts, ofpacts_len)) {
4524 return true;
4525 }
4526 }
4527 return false;
abe529af
BP
4528}
4529
4530/* Folds all of 'facet''s statistics into its rule. Also updates the
4531 * accounting ofhook and emits a NetFlow expiration if appropriate. All of
4532 * 'facet''s statistics in the datapath should have been zeroed and folded into
4533 * its packet and byte counts before this function is called. */
4534static void
15baa734 4535facet_flush_stats(struct facet *facet)
abe529af 4536{
15baa734 4537 struct ofproto_dpif *ofproto = ofproto_dpif_cast(facet->rule->up.ofproto);
b0f7b9b5
BP
4538 struct subfacet *subfacet;
4539
4540 LIST_FOR_EACH (subfacet, list_node, &facet->subfacets) {
cb22974d
BP
4541 ovs_assert(!subfacet->dp_byte_count);
4542 ovs_assert(!subfacet->dp_packet_count);
b0f7b9b5 4543 }
abe529af
BP
4544
4545 facet_push_stats(facet);
3de9590b
BP
4546 if (facet->accounted_bytes < facet->byte_count) {
4547 facet_account(facet);
4548 facet->accounted_bytes = facet->byte_count;
4549 }
abe529af
BP
4550
4551 if (ofproto->netflow && !facet_is_controller_flow(facet)) {
4552 struct ofexpired expired;
4553 expired.flow = facet->flow;
4554 expired.packet_count = facet->packet_count;
4555 expired.byte_count = facet->byte_count;
4556 expired.used = facet->used;
4557 netflow_expire(ofproto->netflow, &facet->nf_flow, &expired);
4558 }
4559
4560 facet->rule->packet_count += facet->packet_count;
4561 facet->rule->byte_count += facet->byte_count;
4562
4563 /* Reset counters to prevent double counting if 'facet' ever gets
4564 * reinstalled. */
bbb5d219 4565 facet_reset_counters(facet);
abe529af
BP
4566
4567 netflow_flow_clear(&facet->nf_flow);
0e553d9c 4568 facet->tcp_flags = 0;
abe529af
BP
4569}
4570
4571/* Searches 'ofproto''s table of facets for one exactly equal to 'flow'.
4572 * Returns it if found, otherwise a null pointer.
4573 *
2b459b83
BP
4574 * 'hash' must be the return value of flow_hash(flow, 0).
4575 *
abe529af
BP
4576 * The returned facet might need revalidation; use facet_lookup_valid()
4577 * instead if that is important. */
4578static struct facet *
2b459b83
BP
4579facet_find(struct ofproto_dpif *ofproto,
4580 const struct flow *flow, uint32_t hash)
abe529af
BP
4581{
4582 struct facet *facet;
4583
2b459b83 4584 HMAP_FOR_EACH_WITH_HASH (facet, hmap_node, hash, &ofproto->facets) {
abe529af
BP
4585 if (flow_equal(flow, &facet->flow)) {
4586 return facet;
4587 }
4588 }
4589
4590 return NULL;
4591}
4592
4593/* Searches 'ofproto''s table of facets for one exactly equal to 'flow'.
4594 * Returns it if found, otherwise a null pointer.
4595 *
2b459b83
BP
4596 * 'hash' must be the return value of flow_hash(flow, 0).
4597 *
abe529af
BP
4598 * The returned facet is guaranteed to be valid. */
4599static struct facet *
2b459b83
BP
4600facet_lookup_valid(struct ofproto_dpif *ofproto, const struct flow *flow,
4601 uint32_t hash)
abe529af 4602{
c57b2226 4603 struct facet *facet;
abe529af 4604
c57b2226 4605 facet = facet_find(ofproto, flow, hash);
abe529af 4606 if (facet
2cc3c58e
EJ
4607 && (ofproto->backer->need_revalidate
4608 || tag_set_intersects(&ofproto->backer->revalidate_set,
4609 facet->tags))) {
c57b2226 4610 facet_revalidate(facet);
abe529af
BP
4611 }
4612
4613 return facet;
4614}
4615
6a7e895f
BP
4616static const char *
4617subfacet_path_to_string(enum subfacet_path path)
4618{
4619 switch (path) {
4620 case SF_NOT_INSTALLED:
4621 return "not installed";
4622 case SF_FAST_PATH:
4623 return "in fast path";
4624 case SF_SLOW_PATH:
4625 return "in slow path";
4626 default:
4627 return "<error>";
4628 }
4629}
4630
4631/* Returns the path in which a subfacet should be installed if its 'slow'
4632 * member has the specified value. */
4633static enum subfacet_path
4634subfacet_want_path(enum slow_path_reason slow)
4635{
4636 return slow ? SF_SLOW_PATH : SF_FAST_PATH;
4637}
4638
4639/* Returns true if 'subfacet' needs to have its datapath flow updated,
4640 * supposing that its actions have been recalculated as 'want_actions' and that
4641 * 'slow' is nonzero iff 'subfacet' should be in the slow path. */
4642static bool
4643subfacet_should_install(struct subfacet *subfacet, enum slow_path_reason slow,
4644 const struct ofpbuf *want_actions)
4645{
4646 enum subfacet_path want_path = subfacet_want_path(slow);
4647 return (want_path != subfacet->path
4648 || (want_path == SF_FAST_PATH
4649 && (subfacet->actions_len != want_actions->size
4650 || memcmp(subfacet->actions, want_actions->data,
4651 subfacet->actions_len))));
4652}
4653
6814e51f
BP
4654static bool
4655facet_check_consistency(struct facet *facet)
4656{
4657 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 15);
4658
4659 struct ofproto_dpif *ofproto = ofproto_dpif_cast(facet->rule->up.ofproto);
4660
050ac423
BP
4661 uint64_t odp_actions_stub[1024 / 8];
4662 struct ofpbuf odp_actions;
4663
6814e51f
BP
4664 struct rule_dpif *rule;
4665 struct subfacet *subfacet;
c53e1132 4666 bool may_log = false;
6814e51f
BP
4667 bool ok;
4668
4669 /* Check the rule for consistency. */
c57b2226
BP
4670 rule = rule_dpif_lookup(ofproto, &facet->flow);
4671 ok = rule == facet->rule;
4672 if (!ok) {
c53e1132 4673 may_log = !VLOG_DROP_WARN(&rl);
c53e1132
BP
4674 if (may_log) {
4675 struct ds s;
6814e51f 4676
c53e1132
BP
4677 ds_init(&s);
4678 flow_format(&s, &facet->flow);
4679 ds_put_format(&s, ": facet associated with wrong rule (was "
4680 "table=%"PRIu8",", facet->rule->up.table_id);
4681 cls_rule_format(&facet->rule->up.cr, &s);
4682 ds_put_format(&s, ") (should have been table=%"PRIu8",",
4683 rule->up.table_id);
4684 cls_rule_format(&rule->up.cr, &s);
4685 ds_put_char(&s, ')');
6814e51f 4686
c53e1132
BP
4687 VLOG_WARN("%s", ds_cstr(&s));
4688 ds_destroy(&s);
4689 }
6814e51f
BP
4690 }
4691
4692 /* Check the datapath actions for consistency. */
050ac423 4693 ofpbuf_use_stub(&odp_actions, odp_actions_stub, sizeof odp_actions_stub);
6814e51f 4694 LIST_FOR_EACH (subfacet, list_node, &facet->subfacets) {
6a7e895f 4695 enum subfacet_path want_path;
9616614b 4696 struct odputil_keybuf keybuf;
6814e51f 4697 struct action_xlate_ctx ctx;
9616614b
BP
4698 struct ofpbuf key;
4699 struct ds s;
6814e51f
BP
4700
4701 action_xlate_ctx_init(&ctx, ofproto, &facet->flow,
0e553d9c 4702 subfacet->initial_tci, rule, 0, NULL);
f25d0cf3 4703 xlate_actions(&ctx, rule->up.ofpacts, rule->up.ofpacts_len,
050ac423 4704 &odp_actions);
6814e51f 4705
6a7e895f
BP
4706 if (subfacet->path == SF_NOT_INSTALLED) {
4707 /* This only happens if the datapath reported an error when we
4708 * tried to install the flow. Don't flag another error here. */
4709 continue;
4710 }
4711
4712 want_path = subfacet_want_path(subfacet->slow);
4713 if (want_path == SF_SLOW_PATH && subfacet->path == SF_SLOW_PATH) {
4714 /* The actions for slow-path flows may legitimately vary from one
4715 * packet to the next. We're done. */
050ac423 4716 continue;
6814e51f
BP
4717 }
4718
6a7e895f 4719 if (!subfacet_should_install(subfacet, subfacet->slow, &odp_actions)) {
9616614b
BP
4720 continue;
4721 }
c53e1132 4722
9616614b
BP
4723 /* Inconsistency! */
4724 if (ok) {
4725 may_log = !VLOG_DROP_WARN(&rl);
4726 ok = false;
4727 }
4728 if (!may_log) {
4729 /* Rate-limited, skip reporting. */
4730 continue;
4731 }
c53e1132 4732
9616614b
BP
4733 ds_init(&s);
4734 subfacet_get_key(subfacet, &keybuf, &key);
4735 odp_flow_key_format(key.data, key.size, &s);
4736
4737 ds_put_cstr(&s, ": inconsistency in subfacet");
6a7e895f 4738 if (want_path != subfacet->path) {
9616614b
BP
4739 enum odp_key_fitness fitness = subfacet->key_fitness;
4740
6a7e895f
BP
4741 ds_put_format(&s, " (%s, fitness=%s)",
4742 subfacet_path_to_string(subfacet->path),
9616614b 4743 odp_key_fitness_to_string(fitness));
6a7e895f
BP
4744 ds_put_format(&s, " (should have been %s)",
4745 subfacet_path_to_string(want_path));
4746 } else if (want_path == SF_FAST_PATH) {
9616614b
BP
4747 ds_put_cstr(&s, " (actions were: ");
4748 format_odp_actions(&s, subfacet->actions,
4749 subfacet->actions_len);
4750 ds_put_cstr(&s, ") (correct actions: ");
4751 format_odp_actions(&s, odp_actions.data, odp_actions.size);
4752 ds_put_char(&s, ')');
4753 } else {
4754 ds_put_cstr(&s, " (actions: ");
4755 format_odp_actions(&s, subfacet->actions,
4756 subfacet->actions_len);
4757 ds_put_char(&s, ')');
6814e51f 4758 }
9616614b
BP
4759 VLOG_WARN("%s", ds_cstr(&s));
4760 ds_destroy(&s);
6814e51f 4761 }
050ac423 4762 ofpbuf_uninit(&odp_actions);
6814e51f
BP
4763
4764 return ok;
4765}
4766
15baa734 4767/* Re-searches the classifier for 'facet':
abe529af
BP
4768 *
4769 * - If the rule found is different from 'facet''s current rule, moves
4770 * 'facet' to the new rule and recompiles its actions.
4771 *
4772 * - If the rule found is the same as 'facet''s current rule, leaves 'facet'
c57b2226
BP
4773 * where it is and recompiles its actions anyway. */
4774static void
15baa734 4775facet_revalidate(struct facet *facet)
abe529af 4776{
15baa734 4777 struct ofproto_dpif *ofproto = ofproto_dpif_cast(facet->rule->up.ofproto);
b95fc6ba
BP
4778 struct actions {
4779 struct nlattr *odp_actions;
4780 size_t actions_len;
4781 };
4782 struct actions *new_actions;
4783
abe529af 4784 struct action_xlate_ctx ctx;
050ac423
BP
4785 uint64_t odp_actions_stub[1024 / 8];
4786 struct ofpbuf odp_actions;
4787
abe529af 4788 struct rule_dpif *new_rule;
b0f7b9b5 4789 struct subfacet *subfacet;
b95fc6ba 4790 int i;
abe529af
BP
4791
4792 COVERAGE_INC(facet_revalidate);
4793
c57b2226 4794 new_rule = rule_dpif_lookup(ofproto, &facet->flow);
abe529af 4795
df2c07f4 4796 /* Calculate new datapath actions.
abe529af
BP
4797 *
4798 * We do not modify any 'facet' state yet, because we might need to, e.g.,
4799 * emit a NetFlow expiration and, if so, we need to have the old state
4800 * around to properly compose it. */
abe529af 4801
df2c07f4
JP
4802 /* If the datapath actions changed or the installability changed,
4803 * then we need to talk to the datapath. */
b95fc6ba
BP
4804 i = 0;
4805 new_actions = NULL;
4806 memset(&ctx, 0, sizeof ctx);
050ac423 4807 ofpbuf_use_stub(&odp_actions, odp_actions_stub, sizeof odp_actions_stub);
b0f7b9b5 4808 LIST_FOR_EACH (subfacet, list_node, &facet->subfacets) {
6a7e895f 4809 enum slow_path_reason slow;
b95fc6ba 4810
e84173dc 4811 action_xlate_ctx_init(&ctx, ofproto, &facet->flow,
0e553d9c 4812 subfacet->initial_tci, new_rule, 0, NULL);
f25d0cf3 4813 xlate_actions(&ctx, new_rule->up.ofpacts, new_rule->up.ofpacts_len,
050ac423 4814 &odp_actions);
b0f7b9b5 4815
6a7e895f
BP
4816 slow = (subfacet->slow & SLOW_MATCH) | ctx.slow;
4817 if (subfacet_should_install(subfacet, slow, &odp_actions)) {
4818 struct dpif_flow_stats stats;
4819
4820 subfacet_install(subfacet,
4821 odp_actions.data, odp_actions.size, &stats, slow);
4822 subfacet_update_stats(subfacet, &stats);
b95fc6ba
BP
4823
4824 if (!new_actions) {
4825 new_actions = xcalloc(list_size(&facet->subfacets),
4826 sizeof *new_actions);
4827 }
050ac423
BP
4828 new_actions[i].odp_actions = xmemdup(odp_actions.data,
4829 odp_actions.size);
4830 new_actions[i].actions_len = odp_actions.size;
abe529af 4831 }
b95fc6ba 4832
b95fc6ba 4833 i++;
b0f7b9b5 4834 }
050ac423
BP
4835 ofpbuf_uninit(&odp_actions);
4836
b95fc6ba 4837 if (new_actions) {
15baa734 4838 facet_flush_stats(facet);
abe529af
BP
4839 }
4840
4841 /* Update 'facet' now that we've taken care of all the old state. */
4842 facet->tags = ctx.tags;
4843 facet->nf_flow.output_iface = ctx.nf_output_iface;
75a75043
BP
4844 facet->has_learn = ctx.has_learn;
4845 facet->has_normal = ctx.has_normal;
0e553d9c 4846 facet->has_fin_timeout = ctx.has_fin_timeout;
9d24de3b 4847 facet->mirrors = ctx.mirrors;
6a7e895f
BP
4848
4849 i = 0;
4850 LIST_FOR_EACH (subfacet, list_node, &facet->subfacets) {
4851 subfacet->slow = (subfacet->slow & SLOW_MATCH) | ctx.slow;
4852
4853 if (new_actions && new_actions[i].odp_actions) {
4854 free(subfacet->actions);
4855 subfacet->actions = new_actions[i].odp_actions;
4856 subfacet->actions_len = new_actions[i].actions_len;
b95fc6ba 4857 }
6a7e895f 4858 i++;
abe529af 4859 }
6a7e895f
BP
4860 free(new_actions);
4861
abe529af
BP
4862 if (facet->rule != new_rule) {
4863 COVERAGE_INC(facet_changed_rule);
4864 list_remove(&facet->list_node);
4865 list_push_back(&new_rule->facets, &facet->list_node);
4866 facet->rule = new_rule;
4867 facet->used = new_rule->up.created;
9d24de3b 4868 facet->prev_used = facet->used;
abe529af 4869 }
abe529af
BP
4870}
4871
4872/* Updates 'facet''s used time. Caller is responsible for calling
4873 * facet_push_stats() to update the flows which 'facet' resubmits into. */
4874static void
15baa734 4875facet_update_time(struct facet *facet, long long int used)
abe529af 4876{
15baa734 4877 struct ofproto_dpif *ofproto = ofproto_dpif_cast(facet->rule->up.ofproto);
abe529af
BP
4878 if (used > facet->used) {
4879 facet->used = used;
1745cd08 4880 ofproto_rule_update_used(&facet->rule->up, used);
abe529af
BP
4881 netflow_flow_update_time(ofproto->netflow, &facet->nf_flow, used);
4882 }
4883}
4884
bbb5d219
EJ
4885static void
4886facet_reset_counters(struct facet *facet)
4887{
4888 facet->packet_count = 0;
4889 facet->byte_count = 0;
9d24de3b
JP
4890 facet->prev_packet_count = 0;
4891 facet->prev_byte_count = 0;
bbb5d219
EJ
4892 facet->accounted_bytes = 0;
4893}
4894
abe529af
BP
4895static void
4896facet_push_stats(struct facet *facet)
4897{
112bc5f4 4898 struct dpif_flow_stats stats;
abe529af 4899
cb22974d
BP
4900 ovs_assert(facet->packet_count >= facet->prev_packet_count);
4901 ovs_assert(facet->byte_count >= facet->prev_byte_count);
4902 ovs_assert(facet->used >= facet->prev_used);
abe529af 4903
112bc5f4
BP
4904 stats.n_packets = facet->packet_count - facet->prev_packet_count;
4905 stats.n_bytes = facet->byte_count - facet->prev_byte_count;
4906 stats.used = facet->used;
4907 stats.tcp_flags = 0;
abe529af 4908
112bc5f4 4909 if (stats.n_packets || stats.n_bytes || facet->used > facet->prev_used) {
9d24de3b
JP
4910 facet->prev_packet_count = facet->packet_count;
4911 facet->prev_byte_count = facet->byte_count;
4912 facet->prev_used = facet->used;
abe529af 4913
112bc5f4 4914 flow_push_stats(facet->rule, &facet->flow, &stats);
9d24de3b
JP
4915
4916 update_mirror_stats(ofproto_dpif_cast(facet->rule->up.ofproto),
112bc5f4 4917 facet->mirrors, stats.n_packets, stats.n_bytes);
abe529af
BP
4918 }
4919}
4920
abe529af 4921static void
112bc5f4 4922rule_credit_stats(struct rule_dpif *rule, const struct dpif_flow_stats *stats)
abe529af 4923{
112bc5f4
BP
4924 rule->packet_count += stats->n_packets;
4925 rule->byte_count += stats->n_bytes;
4926 ofproto_rule_update_used(&rule->up, stats->used);
abe529af
BP
4927}
4928
4929/* Pushes flow statistics to the rules which 'flow' resubmits into given
9d24de3b 4930 * 'rule''s actions and mirrors. */
abe529af 4931static void
18b2a258 4932flow_push_stats(struct rule_dpif *rule,
112bc5f4 4933 const struct flow *flow, const struct dpif_flow_stats *stats)
abe529af
BP
4934{
4935 struct ofproto_dpif *ofproto = ofproto_dpif_cast(rule->up.ofproto);
112bc5f4 4936 struct action_xlate_ctx ctx;
abe529af 4937
112bc5f4 4938 ofproto_rule_update_used(&rule->up, stats->used);
f3b50afb 4939
112bc5f4 4940 action_xlate_ctx_init(&ctx, ofproto, flow, flow->vlan_tci, rule,
0e553d9c 4941 0, NULL);
112bc5f4 4942 ctx.resubmit_stats = stats;
f25d0cf3
BP
4943 xlate_actions_for_side_effects(&ctx, rule->up.ofpacts,
4944 rule->up.ofpacts_len);
abe529af
BP
4945}
4946\f
b0f7b9b5
BP
4947/* Subfacets. */
4948
4949static struct subfacet *
acf60855
JP
4950subfacet_find(struct ofproto_dpif *ofproto,
4951 const struct nlattr *key, size_t key_len, uint32_t key_hash,
4952 const struct flow *flow)
b0f7b9b5
BP
4953{
4954 struct subfacet *subfacet;
4955
4956 HMAP_FOR_EACH_WITH_HASH (subfacet, hmap_node, key_hash,
4957 &ofproto->subfacets) {
4958 if (subfacet->key
4959 ? (subfacet->key_len == key_len
4960 && !memcmp(key, subfacet->key, key_len))
4961 : flow_equal(flow, &subfacet->facet->flow)) {
4962 return subfacet;
4963 }
4964 }
4965
4966 return NULL;
4967}
4968
4969/* Searches 'facet' (within 'ofproto') for a subfacet with the specified
a088a1ff
JP
4970 * 'key_fitness', 'key', and 'key_len' members in 'miss'. Returns the
4971 * existing subfacet if there is one, otherwise creates and returns a
4972 * new subfacet.
b95fc6ba
BP
4973 *
4974 * If the returned subfacet is new, then subfacet->actions will be NULL, in
4975 * which case the caller must populate the actions with
4976 * subfacet_make_actions(). */
b0f7b9b5 4977static struct subfacet *
a088a1ff
JP
4978subfacet_create(struct facet *facet, struct flow_miss *miss,
4979 long long int now)
b0f7b9b5 4980{
15baa734 4981 struct ofproto_dpif *ofproto = ofproto_dpif_cast(facet->rule->up.ofproto);
a088a1ff
JP
4982 enum odp_key_fitness key_fitness = miss->key_fitness;
4983 const struct nlattr *key = miss->key;
4984 size_t key_len = miss->key_len;
4985 uint32_t key_hash;
b0f7b9b5
BP
4986 struct subfacet *subfacet;
4987
a088a1ff
JP
4988 key_hash = odp_flow_key_hash(key, key_len);
4989
3b145dd7
BP
4990 if (list_is_empty(&facet->subfacets)) {
4991 subfacet = &facet->one_subfacet;
4992 } else {
acf60855
JP
4993 subfacet = subfacet_find(ofproto, key, key_len, key_hash,
4994 &facet->flow);
3b145dd7
BP
4995 if (subfacet) {
4996 if (subfacet->facet == facet) {
4997 return subfacet;
4998 }
4999
5000 /* This shouldn't happen. */
5001 VLOG_ERR_RL(&rl, "subfacet with wrong facet");
5002 subfacet_destroy(subfacet);
b0f7b9b5
BP
5003 }
5004
3b145dd7 5005 subfacet = xmalloc(sizeof *subfacet);
b0f7b9b5
BP
5006 }
5007
b0f7b9b5
BP
5008 hmap_insert(&ofproto->subfacets, &subfacet->hmap_node, key_hash);
5009 list_push_back(&facet->subfacets, &subfacet->list_node);
5010 subfacet->facet = facet;
b0f7b9b5
BP
5011 subfacet->key_fitness = key_fitness;
5012 if (key_fitness != ODP_FIT_PERFECT) {
5013 subfacet->key = xmemdup(key, key_len);
5014 subfacet->key_len = key_len;
26cd7e34
BP
5015 } else {
5016 subfacet->key = NULL;
5017 subfacet->key_len = 0;
b0f7b9b5 5018 }
459b16a1 5019 subfacet->used = now;
26cd7e34
BP
5020 subfacet->dp_packet_count = 0;
5021 subfacet->dp_byte_count = 0;
5022 subfacet->actions_len = 0;
5023 subfacet->actions = NULL;
6a7e895f
BP
5024 subfacet->slow = (subfacet->key_fitness == ODP_FIT_TOO_LITTLE
5025 ? SLOW_MATCH
5026 : 0);
5027 subfacet->path = SF_NOT_INSTALLED;
a088a1ff
JP
5028 subfacet->initial_tci = miss->initial_tci;
5029 subfacet->odp_in_port = miss->odp_in_port;
b0f7b9b5
BP
5030
5031 return subfacet;
5032}
5033
b0f7b9b5
BP
5034/* Uninstalls 'subfacet' from the datapath, if it is installed, removes it from
5035 * its facet within 'ofproto', and frees it. */
5036static void
15baa734 5037subfacet_destroy__(struct subfacet *subfacet)
b0f7b9b5 5038{
15baa734
BP
5039 struct facet *facet = subfacet->facet;
5040 struct ofproto_dpif *ofproto = ofproto_dpif_cast(facet->rule->up.ofproto);
5041
5042 subfacet_uninstall(subfacet);
b0f7b9b5
BP
5043 hmap_remove(&ofproto->subfacets, &subfacet->hmap_node);
5044 list_remove(&subfacet->list_node);
5045 free(subfacet->key);
b95fc6ba 5046 free(subfacet->actions);
26cd7e34
BP
5047 if (subfacet != &facet->one_subfacet) {
5048 free(subfacet);
5049 }
b0f7b9b5
BP
5050}
5051
5052/* Destroys 'subfacet', as with subfacet_destroy__(), and then if this was the
5053 * last remaining subfacet in its facet destroys the facet too. */
5054static void
15baa734 5055subfacet_destroy(struct subfacet *subfacet)
b0f7b9b5
BP
5056{
5057 struct facet *facet = subfacet->facet;
5058
551a2f6c
BP
5059 if (list_is_singleton(&facet->subfacets)) {
5060 /* facet_remove() needs at least one subfacet (it will remove it). */
15baa734 5061 facet_remove(facet);
551a2f6c 5062 } else {
15baa734 5063 subfacet_destroy__(subfacet);
b0f7b9b5
BP
5064 }
5065}
5066
1d85f9e5
JP
5067static void
5068subfacet_destroy_batch(struct ofproto_dpif *ofproto,
5069 struct subfacet **subfacets, int n)
5070{
5071 struct odputil_keybuf keybufs[SUBFACET_DESTROY_MAX_BATCH];
5072 struct dpif_op ops[SUBFACET_DESTROY_MAX_BATCH];
5073 struct dpif_op *opsp[SUBFACET_DESTROY_MAX_BATCH];
5074 struct ofpbuf keys[SUBFACET_DESTROY_MAX_BATCH];
5075 struct dpif_flow_stats stats[SUBFACET_DESTROY_MAX_BATCH];
5076 int i;
5077
5078 for (i = 0; i < n; i++) {
5079 ops[i].type = DPIF_OP_FLOW_DEL;
5080 subfacet_get_key(subfacets[i], &keybufs[i], &keys[i]);
5081 ops[i].u.flow_del.key = keys[i].data;
5082 ops[i].u.flow_del.key_len = keys[i].size;
5083 ops[i].u.flow_del.stats = &stats[i];
5084 opsp[i] = &ops[i];
5085 }
5086
acf60855 5087 dpif_operate(ofproto->backer->dpif, opsp, n);
1d85f9e5
JP
5088 for (i = 0; i < n; i++) {
5089 subfacet_reset_dp_stats(subfacets[i], &stats[i]);
5090 subfacets[i]->path = SF_NOT_INSTALLED;
5091 subfacet_destroy(subfacets[i]);
5092 }
5093}
5094
b0f7b9b5
BP
5095/* Initializes 'key' with the sequence of OVS_KEY_ATTR_* Netlink attributes
5096 * that can be used to refer to 'subfacet'. The caller must provide 'keybuf'
5097 * for use as temporary storage. */
5098static void
5099subfacet_get_key(struct subfacet *subfacet, struct odputil_keybuf *keybuf,
5100 struct ofpbuf *key)
5101{
e1b1d06a 5102
b0f7b9b5 5103 if (!subfacet->key) {
ddbfda84
JP
5104 struct flow *flow = &subfacet->facet->flow;
5105
b0f7b9b5 5106 ofpbuf_use_stack(key, keybuf, sizeof *keybuf);
a088a1ff 5107 odp_flow_key_from_flow(key, flow, subfacet->odp_in_port);
b0f7b9b5
BP
5108 } else {
5109 ofpbuf_use_const(key, subfacet->key, subfacet->key_len);
5110 }
5111}
5112
5fe20d5d
BP
5113/* Composes the datapath actions for 'subfacet' based on its rule's actions.
5114 * Translates the actions into 'odp_actions', which the caller must have
5115 * initialized and is responsible for uninitializing. */
b95fc6ba 5116static void
5fe20d5d
BP
5117subfacet_make_actions(struct subfacet *subfacet, const struct ofpbuf *packet,
5118 struct ofpbuf *odp_actions)
b95fc6ba
BP
5119{
5120 struct facet *facet = subfacet->facet;
18b2a258 5121 struct rule_dpif *rule = facet->rule;
15baa734 5122 struct ofproto_dpif *ofproto = ofproto_dpif_cast(rule->up.ofproto);
050ac423 5123
b95fc6ba
BP
5124 struct action_xlate_ctx ctx;
5125
15baa734 5126 action_xlate_ctx_init(&ctx, ofproto, &facet->flow, subfacet->initial_tci,
0e553d9c 5127 rule, 0, packet);
f25d0cf3 5128 xlate_actions(&ctx, rule->up.ofpacts, rule->up.ofpacts_len, odp_actions);
b95fc6ba 5129 facet->tags = ctx.tags;
b95fc6ba
BP
5130 facet->has_learn = ctx.has_learn;
5131 facet->has_normal = ctx.has_normal;
0e553d9c 5132 facet->has_fin_timeout = ctx.has_fin_timeout;
b95fc6ba 5133 facet->nf_flow.output_iface = ctx.nf_output_iface;
9d24de3b 5134 facet->mirrors = ctx.mirrors;
b95fc6ba 5135
6a7e895f 5136 subfacet->slow = (subfacet->slow & SLOW_MATCH) | ctx.slow;
5fe20d5d
BP
5137 if (subfacet->actions_len != odp_actions->size
5138 || memcmp(subfacet->actions, odp_actions->data, odp_actions->size)) {
b95fc6ba 5139 free(subfacet->actions);
5fe20d5d
BP
5140 subfacet->actions_len = odp_actions->size;
5141 subfacet->actions = xmemdup(odp_actions->data, odp_actions->size);
b95fc6ba 5142 }
b95fc6ba
BP
5143}
5144
b0f7b9b5
BP
5145/* Updates 'subfacet''s datapath flow, setting its actions to 'actions_len'
5146 * bytes of actions in 'actions'. If 'stats' is non-null, statistics counters
5147 * in the datapath will be zeroed and 'stats' will be updated with traffic new
5148 * since 'subfacet' was last updated.
5149 *
5150 * Returns 0 if successful, otherwise a positive errno value. */
5151static int
15baa734 5152subfacet_install(struct subfacet *subfacet,
b0f7b9b5 5153 const struct nlattr *actions, size_t actions_len,
6a7e895f
BP
5154 struct dpif_flow_stats *stats,
5155 enum slow_path_reason slow)
b0f7b9b5 5156{
15baa734
BP
5157 struct facet *facet = subfacet->facet;
5158 struct ofproto_dpif *ofproto = ofproto_dpif_cast(facet->rule->up.ofproto);
6a7e895f
BP
5159 enum subfacet_path path = subfacet_want_path(slow);
5160 uint64_t slow_path_stub[128 / 8];
b0f7b9b5
BP
5161 struct odputil_keybuf keybuf;
5162 enum dpif_flow_put_flags flags;
5163 struct ofpbuf key;
5164 int ret;
5165
5166 flags = DPIF_FP_CREATE | DPIF_FP_MODIFY;
5167 if (stats) {
5168 flags |= DPIF_FP_ZERO_STATS;
5169 }
5170
6a7e895f
BP
5171 if (path == SF_SLOW_PATH) {
5172 compose_slow_path(ofproto, &facet->flow, slow,
5173 slow_path_stub, sizeof slow_path_stub,
5174 &actions, &actions_len);
5175 }
5176
b0f7b9b5 5177 subfacet_get_key(subfacet, &keybuf, &key);
acf60855 5178 ret = dpif_flow_put(ofproto->backer->dpif, flags, key.data, key.size,
b0f7b9b5
BP
5179 actions, actions_len, stats);
5180
5181 if (stats) {
5182 subfacet_reset_dp_stats(subfacet, stats);
5183 }
5184
6a7e895f
BP
5185 if (!ret) {
5186 subfacet->path = path;
5187 }
b0f7b9b5
BP
5188 return ret;
5189}
5190
6a7e895f
BP
5191static int
5192subfacet_reinstall(struct subfacet *subfacet, struct dpif_flow_stats *stats)
5193{
5194 return subfacet_install(subfacet, subfacet->actions, subfacet->actions_len,
5195 stats, subfacet->slow);
5196}
5197
b0f7b9b5
BP
5198/* If 'subfacet' is installed in the datapath, uninstalls it. */
5199static void
15baa734 5200subfacet_uninstall(struct subfacet *subfacet)
b0f7b9b5 5201{
6a7e895f 5202 if (subfacet->path != SF_NOT_INSTALLED) {
15baa734
BP
5203 struct rule_dpif *rule = subfacet->facet->rule;
5204 struct ofproto_dpif *ofproto = ofproto_dpif_cast(rule->up.ofproto);
b0f7b9b5
BP
5205 struct odputil_keybuf keybuf;
5206 struct dpif_flow_stats stats;
5207 struct ofpbuf key;
5208 int error;
5209
5210 subfacet_get_key(subfacet, &keybuf, &key);
acf60855
JP
5211 error = dpif_flow_del(ofproto->backer->dpif,
5212 key.data, key.size, &stats);
b0f7b9b5
BP
5213 subfacet_reset_dp_stats(subfacet, &stats);
5214 if (!error) {
15baa734 5215 subfacet_update_stats(subfacet, &stats);
b0f7b9b5 5216 }
6a7e895f 5217 subfacet->path = SF_NOT_INSTALLED;
b0f7b9b5 5218 } else {
cb22974d
BP
5219 ovs_assert(subfacet->dp_packet_count == 0);
5220 ovs_assert(subfacet->dp_byte_count == 0);
b0f7b9b5
BP
5221 }
5222}
5223
5224/* Resets 'subfacet''s datapath statistics counters. This should be called
5225 * when 'subfacet''s statistics are cleared in the datapath. If 'stats' is
5226 * non-null, it should contain the statistics returned by dpif when 'subfacet'
5227 * was reset in the datapath. 'stats' will be modified to include only
5228 * statistics new since 'subfacet' was last updated. */
5229static void
5230subfacet_reset_dp_stats(struct subfacet *subfacet,
5231 struct dpif_flow_stats *stats)
5232{
5233 if (stats
5234 && subfacet->dp_packet_count <= stats->n_packets
5235 && subfacet->dp_byte_count <= stats->n_bytes) {
5236 stats->n_packets -= subfacet->dp_packet_count;
5237 stats->n_bytes -= subfacet->dp_byte_count;
5238 }
5239
5240 subfacet->dp_packet_count = 0;
5241 subfacet->dp_byte_count = 0;
5242}
5243
5244/* Updates 'subfacet''s used time. The caller is responsible for calling
5245 * facet_push_stats() to update the flows which 'subfacet' resubmits into. */
5246static void
15baa734 5247subfacet_update_time(struct subfacet *subfacet, long long int used)
b0f7b9b5
BP
5248{
5249 if (used > subfacet->used) {
5250 subfacet->used = used;
15baa734 5251 facet_update_time(subfacet->facet, used);
b0f7b9b5
BP
5252 }
5253}
5254
5255/* Folds the statistics from 'stats' into the counters in 'subfacet'.
5256 *
5257 * Because of the meaning of a subfacet's counters, it only makes sense to do
5258 * this if 'stats' are not tracked in the datapath, that is, if 'stats'
5259 * represents a packet that was sent by hand or if it represents statistics
5260 * that have been cleared out of the datapath. */
5261static void
15baa734 5262subfacet_update_stats(struct subfacet *subfacet,
b0f7b9b5
BP
5263 const struct dpif_flow_stats *stats)
5264{
5265 if (stats->n_packets || stats->used > subfacet->used) {
5266 struct facet *facet = subfacet->facet;
5267
15baa734 5268 subfacet_update_time(subfacet, stats->used);
b0f7b9b5
BP
5269 facet->packet_count += stats->n_packets;
5270 facet->byte_count += stats->n_bytes;
0e553d9c 5271 facet->tcp_flags |= stats->tcp_flags;
b0f7b9b5
BP
5272 facet_push_stats(facet);
5273 netflow_flow_update_flags(&facet->nf_flow, stats->tcp_flags);
5274 }
5275}
5276\f
abe529af
BP
5277/* Rules. */
5278
5279static struct rule_dpif *
c57b2226
BP
5280rule_dpif_lookup(struct ofproto_dpif *ofproto, const struct flow *flow)
5281{
c57b2226
BP
5282 struct rule_dpif *rule;
5283
5284 rule = rule_dpif_lookup__(ofproto, flow, 0);
5285 if (rule) {
5286 return rule;
5287 }
5288
c376f9a3 5289 return rule_dpif_miss_rule(ofproto, flow);
c57b2226
BP
5290}
5291
5292static struct rule_dpif *
5293rule_dpif_lookup__(struct ofproto_dpif *ofproto, const struct flow *flow,
5294 uint8_t table_id)
abe529af 5295{
7257b535
BP
5296 struct cls_rule *cls_rule;
5297 struct classifier *cls;
5298
9cdaaebe
BP
5299 if (table_id >= N_TABLES) {
5300 return NULL;
5301 }
5302
d0918789 5303 cls = &ofproto->up.tables[table_id].cls;
eadef313 5304 if (flow->nw_frag & FLOW_NW_FRAG_ANY
7257b535
BP
5305 && ofproto->up.frag_handling == OFPC_FRAG_NORMAL) {
5306 /* For OFPC_NORMAL frag_handling, we must pretend that transport ports
5307 * are unavailable. */
5308 struct flow ofpc_normal_flow = *flow;
5309 ofpc_normal_flow.tp_src = htons(0);
5310 ofpc_normal_flow.tp_dst = htons(0);
5311 cls_rule = classifier_lookup(cls, &ofpc_normal_flow);
5312 } else {
5313 cls_rule = classifier_lookup(cls, flow);
5314 }
5315 return rule_dpif_cast(rule_from_cls_rule(cls_rule));
abe529af
BP
5316}
5317
c376f9a3
IY
5318static struct rule_dpif *
5319rule_dpif_miss_rule(struct ofproto_dpif *ofproto, const struct flow *flow)
5320{
5321 struct ofport_dpif *port;
5322
5323 port = get_ofp_port(ofproto, flow->in_port);
5324 if (!port) {
5325 VLOG_WARN_RL(&rl, "packet-in on unknown port %"PRIu16, flow->in_port);
5326 return ofproto->miss_rule;
5327 }
5328
5329 if (port->up.pp.config & OFPUTIL_PC_NO_PACKET_IN) {
5330 return ofproto->no_packet_in_rule;
5331 }
5332 return ofproto->miss_rule;
5333}
5334
7ee20df1
BP
5335static void
5336complete_operation(struct rule_dpif *rule)
5337{
5338 struct ofproto_dpif *ofproto = ofproto_dpif_cast(rule->up.ofproto);
5339
54a9cbc9 5340 rule_invalidate(rule);
7ee20df1
BP
5341 if (clogged) {
5342 struct dpif_completion *c = xmalloc(sizeof *c);
5343 c->op = rule->up.pending;
5344 list_push_back(&ofproto->completions, &c->list_node);
5345 } else {
5346 ofoperation_complete(rule->up.pending, 0);
5347 }
5348}
5349
abe529af
BP
5350static struct rule *
5351rule_alloc(void)
5352{
5353 struct rule_dpif *rule = xmalloc(sizeof *rule);
5354 return &rule->up;
5355}
5356
5357static void
5358rule_dealloc(struct rule *rule_)
5359{
5360 struct rule_dpif *rule = rule_dpif_cast(rule_);
5361 free(rule);
5362}
5363
90bf1e07 5364static enum ofperr
abe529af
BP
5365rule_construct(struct rule *rule_)
5366{
5367 struct rule_dpif *rule = rule_dpif_cast(rule_);
5368 struct ofproto_dpif *ofproto = ofproto_dpif_cast(rule->up.ofproto);
7ee20df1 5369 struct rule_dpif *victim;
54a9cbc9 5370 uint8_t table_id;
abe529af 5371
abe529af
BP
5372 rule->packet_count = 0;
5373 rule->byte_count = 0;
abe529af 5374
7ee20df1
BP
5375 victim = rule_dpif_cast(ofoperation_get_victim(rule->up.pending));
5376 if (victim && !list_is_empty(&victim->facets)) {
5377 struct facet *facet;
5378
5379 rule->facets = victim->facets;
5380 list_moved(&rule->facets);
5381 LIST_FOR_EACH (facet, list_node, &rule->facets) {
bbb5d219
EJ
5382 /* XXX: We're only clearing our local counters here. It's possible
5383 * that quite a few packets are unaccounted for in the datapath
5384 * statistics. These will be accounted to the new rule instead of
5385 * cleared as required. This could be fixed by clearing out the
5386 * datapath statistics for this facet, but currently it doesn't
5387 * seem worth it. */
5388 facet_reset_counters(facet);
7ee20df1
BP
5389 facet->rule = rule;
5390 }
5391 } else {
5392 /* Must avoid list_moved() in this case. */
5393 list_init(&rule->facets);
5394 }
abe529af 5395
54a9cbc9 5396 table_id = rule->up.table_id;
5cb7a798
BP
5397 if (victim) {
5398 rule->tag = victim->tag;
5399 } else if (table_id == 0) {
5400 rule->tag = 0;
5401 } else {
5402 struct flow flow;
5403
5404 miniflow_expand(&rule->up.cr.match.flow, &flow);
5405 rule->tag = rule_calculate_tag(&flow, &rule->up.cr.match.mask,
5406 ofproto->tables[table_id].basis);
5407 }
54a9cbc9 5408
7ee20df1 5409 complete_operation(rule);
abe529af
BP
5410 return 0;
5411}
5412
5413static void
5414rule_destruct(struct rule *rule_)
5415{
5416 struct rule_dpif *rule = rule_dpif_cast(rule_);
abe529af
BP
5417 struct facet *facet, *next_facet;
5418
abe529af 5419 LIST_FOR_EACH_SAFE (facet, next_facet, list_node, &rule->facets) {
15baa734 5420 facet_revalidate(facet);
abe529af 5421 }
7ee20df1
BP
5422
5423 complete_operation(rule);
abe529af
BP
5424}
5425
5426static void
5427rule_get_stats(struct rule *rule_, uint64_t *packets, uint64_t *bytes)
5428{
5429 struct rule_dpif *rule = rule_dpif_cast(rule_);
5430 struct facet *facet;
5431
5432 /* Start from historical data for 'rule' itself that are no longer tracked
5433 * in facets. This counts, for example, facets that have expired. */
5434 *packets = rule->packet_count;
5435 *bytes = rule->byte_count;
5436
5437 /* Add any statistics that are tracked by facets. This includes
5438 * statistical data recently updated by ofproto_update_stats() as well as
5439 * stats for packets that were executed "by hand" via dpif_execute(). */
5440 LIST_FOR_EACH (facet, list_node, &rule->facets) {
5441 *packets += facet->packet_count;
5442 *bytes += facet->byte_count;
5443 }
5444}
5445
0a740f48
EJ
5446static void
5447rule_dpif_execute(struct rule_dpif *rule, const struct flow *flow,
5448 struct ofpbuf *packet)
abe529af 5449{
abe529af 5450 struct ofproto_dpif *ofproto = ofproto_dpif_cast(rule->up.ofproto);
050ac423 5451
112bc5f4 5452 struct dpif_flow_stats stats;
050ac423 5453
abe529af 5454 struct action_xlate_ctx ctx;
050ac423
BP
5455 uint64_t odp_actions_stub[1024 / 8];
5456 struct ofpbuf odp_actions;
abe529af 5457
a7752d4a 5458 dpif_flow_stats_extract(flow, packet, time_msec(), &stats);
112bc5f4
BP
5459 rule_credit_stats(rule, &stats);
5460
050ac423 5461 ofpbuf_use_stub(&odp_actions, odp_actions_stub, sizeof odp_actions_stub);
54834960 5462 action_xlate_ctx_init(&ctx, ofproto, flow, flow->vlan_tci,
112bc5f4
BP
5463 rule, stats.tcp_flags, packet);
5464 ctx.resubmit_stats = &stats;
f25d0cf3 5465 xlate_actions(&ctx, rule->up.ofpacts, rule->up.ofpacts_len, &odp_actions);
112bc5f4
BP
5466
5467 execute_odp_actions(ofproto, flow, odp_actions.data,
5468 odp_actions.size, packet);
5469
050ac423 5470 ofpbuf_uninit(&odp_actions);
0a740f48 5471}
5bf0e941 5472
0a740f48
EJ
5473static enum ofperr
5474rule_execute(struct rule *rule, const struct flow *flow,
5475 struct ofpbuf *packet)
5476{
5477 rule_dpif_execute(rule_dpif_cast(rule), flow, packet);
5478 ofpbuf_delete(packet);
5bf0e941 5479 return 0;
abe529af
BP
5480}
5481
7ee20df1
BP
5482static void
5483rule_modify_actions(struct rule *rule_)
abe529af
BP
5484{
5485 struct rule_dpif *rule = rule_dpif_cast(rule_);
7ee20df1
BP
5486
5487 complete_operation(rule);
abe529af
BP
5488}
5489\f
97d6520b 5490/* Sends 'packet' out 'ofport'.
52a90c29 5491 * May modify 'packet'.
abe529af
BP
5492 * Returns 0 if successful, otherwise a positive errno value. */
5493static int
52a90c29 5494send_packet(const struct ofport_dpif *ofport, struct ofpbuf *packet)
abe529af 5495{
97d6520b 5496 const struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofport->up.ofproto);
b9ad7294 5497 uint64_t odp_actions_stub[1024 / 8];
80e5eed9
BP
5498 struct ofpbuf key, odp_actions;
5499 struct odputil_keybuf keybuf;
9b56fe13 5500 uint32_t odp_port;
80e5eed9 5501 struct flow flow;
abe529af
BP
5502 int error;
5503
72e8bf28 5504 flow_extract(packet, 0, 0, NULL, OFPP_LOCAL, &flow);
0a740f48
EJ
5505 if (netdev_vport_is_patch(ofport->up.netdev)) {
5506 struct ofproto_dpif *peer_ofproto;
5507 struct dpif_flow_stats stats;
5508 struct ofport_dpif *peer;
5509 struct rule_dpif *rule;
5510
5511 peer = ofport_get_peer(ofport);
5512 if (!peer) {
5513 return ENODEV;
5514 }
5515
5516 dpif_flow_stats_extract(&flow, packet, time_msec(), &stats);
b9ad7294
EJ
5517 netdev_vport_inc_tx(ofport->up.netdev, &stats);
5518 netdev_vport_inc_rx(peer->up.netdev, &stats);
0a740f48
EJ
5519
5520 flow.in_port = peer->up.ofp_port;
5521 peer_ofproto = ofproto_dpif_cast(peer->up.ofproto);
5522 rule = rule_dpif_lookup(peer_ofproto, &flow);
5523 rule_dpif_execute(rule, &flow, packet);
5524
5525 return 0;
5526 }
5527
b9ad7294
EJ
5528 ofpbuf_use_stub(&odp_actions, odp_actions_stub, sizeof odp_actions_stub);
5529
5530 if (ofport->tnl_port) {
5531 struct dpif_flow_stats stats;
5532
5533 odp_port = tnl_port_send(ofport->tnl_port, &flow);
5534 if (odp_port == OVSP_NONE) {
5535 return ENODEV;
5536 }
5537
5538 dpif_flow_stats_extract(&flow, packet, time_msec(), &stats);
5539 netdev_vport_inc_tx(ofport->up.netdev, &stats);
5540 odp_put_tunnel_action(&flow.tunnel, &odp_actions);
09a0d2e7 5541 odp_put_skb_mark_action(flow.skb_mark, &odp_actions);
b9ad7294
EJ
5542 } else {
5543 odp_port = vsp_realdev_to_vlandev(ofproto, ofport->odp_port,
5544 flow.vlan_tci);
5545 if (odp_port != ofport->odp_port) {
5546 eth_pop_vlan(packet);
5547 flow.vlan_tci = htons(0);
5548 }
52a90c29
BP
5549 }
5550
80e5eed9 5551 ofpbuf_use_stack(&key, &keybuf, sizeof keybuf);
e1b1d06a
JP
5552 odp_flow_key_from_flow(&key, &flow,
5553 ofp_port_to_odp_port(ofproto, flow.in_port));
80e5eed9 5554
6ff686f2
PS
5555 compose_sflow_action(ofproto, &odp_actions, &flow, odp_port);
5556
df2c07f4 5557 nl_msg_put_u32(&odp_actions, OVS_ACTION_ATTR_OUTPUT, odp_port);
acf60855 5558 error = dpif_execute(ofproto->backer->dpif,
80e5eed9
BP
5559 key.data, key.size,
5560 odp_actions.data, odp_actions.size,
abe529af
BP
5561 packet);
5562 ofpbuf_uninit(&odp_actions);
5563
5564 if (error) {
5565 VLOG_WARN_RL(&rl, "%s: failed to send packet on port %"PRIu32" (%s)",
5566 ofproto->up.name, odp_port, strerror(error));
5567 }
6527c598 5568 ofproto_update_local_port_stats(ofport->up.ofproto, packet->size, 0);
abe529af
BP
5569 return error;
5570}
5571\f
df2c07f4 5572/* OpenFlow to datapath action translation. */
abe529af 5573
ffaef958 5574static bool may_receive(const struct ofport_dpif *, struct action_xlate_ctx *);
f25d0cf3
BP
5575static void do_xlate_actions(const struct ofpact *, size_t ofpacts_len,
5576 struct action_xlate_ctx *);
4cd78906 5577static void xlate_normal(struct action_xlate_ctx *);
abe529af 5578
6a7e895f
BP
5579/* Composes an ODP action for a "slow path" action for 'flow' within 'ofproto'.
5580 * The action will state 'slow' as the reason that the action is in the slow
5581 * path. (This is purely informational: it allows a human viewing "ovs-dpctl
5582 * dump-flows" output to see why a flow is in the slow path.)
5583 *
5584 * The 'stub_size' bytes in 'stub' will be used to store the action.
5585 * 'stub_size' must be large enough for the action.
5586 *
5587 * The action and its size will be stored in '*actionsp' and '*actions_lenp',
5588 * respectively. */
5589static void
5590compose_slow_path(const struct ofproto_dpif *ofproto, const struct flow *flow,
5591 enum slow_path_reason slow,
5592 uint64_t *stub, size_t stub_size,
5593 const struct nlattr **actionsp, size_t *actions_lenp)
5594{
5595 union user_action_cookie cookie;
5596 struct ofpbuf buf;
5597
5598 cookie.type = USER_ACTION_COOKIE_SLOW_PATH;
5599 cookie.slow_path.unused = 0;
5600 cookie.slow_path.reason = slow;
5601
5602 ofpbuf_use_stack(&buf, stub, stub_size);
625b0720 5603 if (slow & (SLOW_CFM | SLOW_LACP | SLOW_STP)) {
9032f11e 5604 uint32_t pid = dpif_port_get_pid(ofproto->backer->dpif, UINT32_MAX);
e995e3df 5605 odp_put_userspace_action(pid, &cookie, sizeof cookie, &buf);
625b0720
BP
5606 } else {
5607 put_userspace_action(ofproto, &buf, flow, &cookie);
5608 }
6a7e895f
BP
5609 *actionsp = buf.data;
5610 *actions_lenp = buf.size;
5611}
5612
98403001
BP
5613static size_t
5614put_userspace_action(const struct ofproto_dpif *ofproto,
5615 struct ofpbuf *odp_actions,
5616 const struct flow *flow,
1673e0e4 5617 const union user_action_cookie *cookie)
98403001 5618{
98403001
BP
5619 uint32_t pid;
5620
acf60855 5621 pid = dpif_port_get_pid(ofproto->backer->dpif,
e1b1d06a 5622 ofp_port_to_odp_port(ofproto, flow->in_port));
98403001 5623
e995e3df 5624 return odp_put_userspace_action(pid, cookie, sizeof *cookie, odp_actions);
98403001
BP
5625}
5626
36fc5f18
BP
5627static void
5628compose_sflow_cookie(const struct ofproto_dpif *ofproto,
5629 ovs_be16 vlan_tci, uint32_t odp_port,
1673e0e4 5630 unsigned int n_outputs, union user_action_cookie *cookie)
36fc5f18
BP
5631{
5632 int ifindex;
5633
5634 cookie->type = USER_ACTION_COOKIE_SFLOW;
1673e0e4 5635 cookie->sflow.vlan_tci = vlan_tci;
36fc5f18
BP
5636
5637 /* See http://www.sflow.org/sflow_version_5.txt (search for "Input/output
5638 * port information") for the interpretation of cookie->output. */
5639 switch (n_outputs) {
5640 case 0:
5641 /* 0x40000000 | 256 means "packet dropped for unknown reason". */
1673e0e4 5642 cookie->sflow.output = 0x40000000 | 256;
36fc5f18
BP
5643 break;
5644
5645 case 1:
5646 ifindex = dpif_sflow_odp_port_to_ifindex(ofproto->sflow, odp_port);
5647 if (ifindex) {
1673e0e4 5648 cookie->sflow.output = ifindex;
36fc5f18
BP
5649 break;
5650 }
5651 /* Fall through. */
5652 default:
5653 /* 0x80000000 means "multiple output ports. */
1673e0e4 5654 cookie->sflow.output = 0x80000000 | n_outputs;
36fc5f18
BP
5655 break;
5656 }
5657}
5658
6ff686f2
PS
5659/* Compose SAMPLE action for sFlow. */
5660static size_t
5661compose_sflow_action(const struct ofproto_dpif *ofproto,
5662 struct ofpbuf *odp_actions,
5663 const struct flow *flow,
5664 uint32_t odp_port)
5665{
6ff686f2 5666 uint32_t probability;
1673e0e4 5667 union user_action_cookie cookie;
6ff686f2 5668 size_t sample_offset, actions_offset;
36fc5f18 5669 int cookie_offset;
6ff686f2
PS
5670
5671 if (!ofproto->sflow || flow->in_port == OFPP_NONE) {
5672 return 0;
5673 }
5674
6ff686f2
PS
5675 sample_offset = nl_msg_start_nested(odp_actions, OVS_ACTION_ATTR_SAMPLE);
5676
5677 /* Number of packets out of UINT_MAX to sample. */
5678 probability = dpif_sflow_get_probability(ofproto->sflow);
5679 nl_msg_put_u32(odp_actions, OVS_SAMPLE_ATTR_PROBABILITY, probability);
5680
5681 actions_offset = nl_msg_start_nested(odp_actions, OVS_SAMPLE_ATTR_ACTIONS);
36fc5f18
BP
5682 compose_sflow_cookie(ofproto, htons(0), odp_port,
5683 odp_port == OVSP_NONE ? 0 : 1, &cookie);
98403001 5684 cookie_offset = put_userspace_action(ofproto, odp_actions, flow, &cookie);
6ff686f2
PS
5685
5686 nl_msg_end_nested(odp_actions, actions_offset);
5687 nl_msg_end_nested(odp_actions, sample_offset);
98403001 5688 return cookie_offset;
6ff686f2
PS
5689}
5690
5691/* SAMPLE action must be first action in any given list of actions.
5692 * At this point we do not have all information required to build it. So try to
5693 * build sample action as complete as possible. */
5694static void
5695add_sflow_action(struct action_xlate_ctx *ctx)
5696{
5697 ctx->user_cookie_offset = compose_sflow_action(ctx->ofproto,
5698 ctx->odp_actions,
5699 &ctx->flow, OVSP_NONE);
5700 ctx->sflow_odp_port = 0;
5701 ctx->sflow_n_outputs = 0;
5702}
5703
5704/* Fix SAMPLE action according to data collected while composing ODP actions.
5705 * We need to fix SAMPLE actions OVS_SAMPLE_ATTR_ACTIONS attribute, i.e. nested
5706 * USERSPACE action's user-cookie which is required for sflow. */
5707static void
5708fix_sflow_action(struct action_xlate_ctx *ctx)
5709{
5710 const struct flow *base = &ctx->base_flow;
1673e0e4 5711 union user_action_cookie *cookie;
6ff686f2
PS
5712
5713 if (!ctx->user_cookie_offset) {
5714 return;
5715 }
5716
5717 cookie = ofpbuf_at(ctx->odp_actions, ctx->user_cookie_offset,
36fc5f18 5718 sizeof(*cookie));
cb22974d 5719 ovs_assert(cookie->type == USER_ACTION_COOKIE_SFLOW);
6ff686f2 5720
36fc5f18
BP
5721 compose_sflow_cookie(ctx->ofproto, base->vlan_tci,
5722 ctx->sflow_odp_port, ctx->sflow_n_outputs, cookie);
6ff686f2
PS
5723}
5724
6ff686f2 5725static void
81b1afb1
EJ
5726compose_output_action__(struct action_xlate_ctx *ctx, uint16_t ofp_port,
5727 bool check_stp)
6ff686f2 5728{
d59906fb 5729 const struct ofport_dpif *ofport = get_ofp_port(ctx->ofproto, ofp_port);
52a90c29 5730 ovs_be16 flow_vlan_tci = ctx->flow.vlan_tci;
b9ad7294 5731 ovs_be64 flow_tun_id = ctx->flow.tunnel.tun_id;
8b36f51e 5732 uint8_t flow_nw_tos = ctx->flow.nw_tos;
a4454ac6 5733 struct priority_to_dscp *pdscp;
0a740f48
EJ
5734 uint32_t out_port, odp_port;
5735
5736 /* If 'struct flow' gets additional metadata, we'll need to zero it out
5737 * before traversing a patch port. */
b02475c5 5738 BUILD_ASSERT_DECL(FLOW_WC_SEQ == 19);
d59906fb 5739
a4454ac6
EJ
5740 if (!ofport) {
5741 xlate_report(ctx, "Nonexistent output port");
5742 return;
5743 } else if (ofport->up.pp.config & OFPUTIL_PC_NO_FWD) {
5744 xlate_report(ctx, "OFPPC_NO_FWD set, skipping output");
5745 return;
5746 } else if (check_stp && !stp_forward_in_state(ofport->stp_state)) {
5747 xlate_report(ctx, "STP not in forwarding state, skipping output");
5748 return;
5749 }
8b36f51e 5750
0a740f48
EJ
5751 if (netdev_vport_is_patch(ofport->up.netdev)) {
5752 struct ofport_dpif *peer = ofport_get_peer(ofport);
5753 struct flow old_flow = ctx->flow;
5754 const struct ofproto_dpif *peer_ofproto;
bb374ef6 5755 enum slow_path_reason special;
ffaef958 5756 struct ofport_dpif *in_port;
0a740f48
EJ
5757
5758 if (!peer) {
5759 xlate_report(ctx, "Nonexistent patch port peer");
5760 return;
5761 }
5762
5763 peer_ofproto = ofproto_dpif_cast(peer->up.ofproto);
5764 if (peer_ofproto->backer != ctx->ofproto->backer) {
5765 xlate_report(ctx, "Patch port peer on a different datapath");
5766 return;
5767 }
5768
5769 ctx->ofproto = ofproto_dpif_cast(peer->up.ofproto);
5770 ctx->flow.in_port = peer->up.ofp_port;
5771 ctx->flow.metadata = htonll(0);
5772 memset(&ctx->flow.tunnel, 0, sizeof ctx->flow.tunnel);
5773 memset(ctx->flow.regs, 0, sizeof ctx->flow.regs);
ffaef958
BP
5774
5775 in_port = get_ofp_port(ctx->ofproto, ctx->flow.in_port);
bb374ef6
EJ
5776 special = process_special(ctx->ofproto, &ctx->flow, in_port,
5777 ctx->packet);
5778 if (special) {
5779 ctx->slow |= special;
5780 } else if (!in_port || may_receive(in_port, ctx)) {
ffaef958
BP
5781 if (!in_port || stp_forward_in_state(in_port->stp_state)) {
5782 xlate_table_action(ctx, ctx->flow.in_port, 0, true);
5783 } else {
5784 /* Forwarding is disabled by STP. Let OFPP_NORMAL and the
5785 * learning action look at the packet, then drop it. */
5786 struct flow old_base_flow = ctx->base_flow;
5787 size_t old_size = ctx->odp_actions->size;
5788 xlate_table_action(ctx, ctx->flow.in_port, 0, true);
5789 ctx->base_flow = old_base_flow;
5790 ctx->odp_actions->size = old_size;
5791 }
5792 }
5793
0a740f48
EJ
5794 ctx->flow = old_flow;
5795 ctx->ofproto = ofproto_dpif_cast(ofport->up.ofproto);
5796
5797 if (ctx->resubmit_stats) {
b9ad7294
EJ
5798 netdev_vport_inc_tx(ofport->up.netdev, ctx->resubmit_stats);
5799 netdev_vport_inc_rx(peer->up.netdev, ctx->resubmit_stats);
0a740f48
EJ
5800 }
5801
5802 return;
5803 }
5804
a4454ac6
EJ
5805 pdscp = get_priority(ofport, ctx->flow.skb_priority);
5806 if (pdscp) {
5807 ctx->flow.nw_tos &= ~IP_DSCP_MASK;
5808 ctx->flow.nw_tos |= pdscp->dscp;
d59906fb
EJ
5809 }
5810
0a740f48 5811 odp_port = ofp_port_to_odp_port(ctx->ofproto, ofp_port);
b9ad7294
EJ
5812 if (ofport->tnl_port) {
5813 odp_port = tnl_port_send(ofport->tnl_port, &ctx->flow);
5814 if (odp_port == OVSP_NONE) {
5815 xlate_report(ctx, "Tunneling decided against output");
5816 return;
5817 }
5818
5819 if (ctx->resubmit_stats) {
5820 netdev_vport_inc_tx(ofport->up.netdev, ctx->resubmit_stats);
5821 }
5822 out_port = odp_port;
5823 commit_odp_tunnel_action(&ctx->flow, &ctx->base_flow,
5824 ctx->odp_actions);
5825 } else {
5826 out_port = vsp_realdev_to_vlandev(ctx->ofproto, odp_port,
5827 ctx->flow.vlan_tci);
5828 if (out_port != odp_port) {
5829 ctx->flow.vlan_tci = htons(0);
5830 }
52a90c29 5831 }
5bbda0aa 5832 commit_odp_actions(&ctx->flow, &ctx->base_flow, ctx->odp_actions);
52a90c29
BP
5833 nl_msg_put_u32(ctx->odp_actions, OVS_ACTION_ATTR_OUTPUT, out_port);
5834
6ff686f2
PS
5835 ctx->sflow_odp_port = odp_port;
5836 ctx->sflow_n_outputs++;
81b1afb1 5837 ctx->nf_output_iface = ofp_port;
b9ad7294 5838 ctx->flow.tunnel.tun_id = flow_tun_id;
52a90c29 5839 ctx->flow.vlan_tci = flow_vlan_tci;
8b36f51e 5840 ctx->flow.nw_tos = flow_nw_tos;
6ff686f2
PS
5841}
5842
abe529af 5843static void
5e48dc2b 5844compose_output_action(struct action_xlate_ctx *ctx, uint16_t ofp_port)
abe529af 5845{
81b1afb1 5846 compose_output_action__(ctx, ofp_port, true);
abe529af
BP
5847}
5848
5849static void
29901626 5850xlate_table_action(struct action_xlate_ctx *ctx,
1688c479 5851 uint16_t in_port, uint8_t table_id, bool may_packet_in)
abe529af
BP
5852{
5853 if (ctx->recurse < MAX_RESUBMIT_RECURSION) {
54a9cbc9 5854 struct ofproto_dpif *ofproto = ctx->ofproto;
abe529af
BP
5855 struct rule_dpif *rule;
5856 uint16_t old_in_port;
29901626
BP
5857 uint8_t old_table_id;
5858
5859 old_table_id = ctx->table_id;
5860 ctx->table_id = table_id;
abe529af 5861
54a9cbc9 5862 /* Look up a flow with 'in_port' as the input port. */
abe529af
BP
5863 old_in_port = ctx->flow.in_port;
5864 ctx->flow.in_port = in_port;
c57b2226 5865 rule = rule_dpif_lookup__(ofproto, &ctx->flow, table_id);
54a9cbc9
BP
5866
5867 /* Tag the flow. */
5868 if (table_id > 0 && table_id < N_TABLES) {
5869 struct table_dpif *table = &ofproto->tables[table_id];
5870 if (table->other_table) {
33780682 5871 ctx->tags |= (rule && rule->tag
54a9cbc9
BP
5872 ? rule->tag
5873 : rule_calculate_tag(&ctx->flow,
5cb7a798 5874 &table->other_table->mask,
54a9cbc9
BP
5875 table->basis));
5876 }
5877 }
5878
5879 /* Restore the original input port. Otherwise OFPP_NORMAL and
5880 * OFPP_IN_PORT will have surprising behavior. */
abe529af
BP
5881 ctx->flow.in_port = old_in_port;
5882
5883 if (ctx->resubmit_hook) {
5884 ctx->resubmit_hook(ctx, rule);
5885 }
5886
1688c479 5887 if (rule == NULL && may_packet_in) {
5dca28b5 5888 /* XXX
1688c479
IY
5889 * check if table configuration flags
5890 * OFPTC_TABLE_MISS_CONTROLLER, default.
5891 * OFPTC_TABLE_MISS_CONTINUE,
5892 * OFPTC_TABLE_MISS_DROP
5893 * When OF1.0, OFPTC_TABLE_MISS_CONTINUE is used. What to do?
5894 */
5895 rule = rule_dpif_miss_rule(ofproto, &ctx->flow);
5896 }
5897
abe529af 5898 if (rule) {
18b2a258 5899 struct rule_dpif *old_rule = ctx->rule;
54834960 5900
112bc5f4
BP
5901 if (ctx->resubmit_stats) {
5902 rule_credit_stats(rule, ctx->resubmit_stats);
5903 }
5904
abe529af 5905 ctx->recurse++;
18b2a258 5906 ctx->rule = rule;
f25d0cf3 5907 do_xlate_actions(rule->up.ofpacts, rule->up.ofpacts_len, ctx);
18b2a258 5908 ctx->rule = old_rule;
abe529af
BP
5909 ctx->recurse--;
5910 }
29901626
BP
5911
5912 ctx->table_id = old_table_id;
abe529af
BP
5913 } else {
5914 static struct vlog_rate_limit recurse_rl = VLOG_RATE_LIMIT_INIT(1, 1);
5915
29901626 5916 VLOG_ERR_RL(&recurse_rl, "resubmit actions recursed over %d times",
abe529af 5917 MAX_RESUBMIT_RECURSION);
6a6455e5 5918 ctx->max_resubmit_trigger = true;
abe529af
BP
5919 }
5920}
5921
29901626 5922static void
f25d0cf3
BP
5923xlate_ofpact_resubmit(struct action_xlate_ctx *ctx,
5924 const struct ofpact_resubmit *resubmit)
29901626
BP
5925{
5926 uint16_t in_port;
5927 uint8_t table_id;
5928
f25d0cf3
BP
5929 in_port = resubmit->in_port;
5930 if (in_port == OFPP_IN_PORT) {
5931 in_port = ctx->flow.in_port;
5932 }
5933
5934 table_id = resubmit->table_id;
5935 if (table_id == 255) {
5936 table_id = ctx->table_id;
5937 }
29901626 5938
1688c479 5939 xlate_table_action(ctx, in_port, table_id, false);
29901626
BP
5940}
5941
abe529af 5942static void
d59906fb 5943flood_packets(struct action_xlate_ctx *ctx, bool all)
abe529af
BP
5944{
5945 struct ofport_dpif *ofport;
5946
b3e9b2ed 5947 HMAP_FOR_EACH (ofport, up.hmap_node, &ctx->ofproto->up.ports) {
abe529af 5948 uint16_t ofp_port = ofport->up.ofp_port;
d59906fb
EJ
5949
5950 if (ofp_port == ctx->flow.in_port) {
5951 continue;
5952 }
5953
5e48dc2b 5954 if (all) {
81b1afb1 5955 compose_output_action__(ctx, ofp_port, false);
9e1fd49b 5956 } else if (!(ofport->up.pp.config & OFPUTIL_PC_NO_FLOOD)) {
5e48dc2b 5957 compose_output_action(ctx, ofp_port);
abe529af
BP
5958 }
5959 }
b3e9b2ed
EJ
5960
5961 ctx->nf_output_iface = NF_OUT_FLOOD;
abe529af
BP
5962}
5963
6ff686f2 5964static void
f0fd1a17 5965execute_controller_action(struct action_xlate_ctx *ctx, int len,
a7349929
BP
5966 enum ofp_packet_in_reason reason,
5967 uint16_t controller_id)
6ff686f2 5968{
999fba59
EJ
5969 struct ofputil_packet_in pin;
5970 struct ofpbuf *packet;
6ff686f2 5971
6a7e895f 5972 ctx->slow |= SLOW_CONTROLLER;
999fba59
EJ
5973 if (!ctx->packet) {
5974 return;
5975 }
5976
5977 packet = ofpbuf_clone(ctx->packet);
5978
5979 if (packet->l2 && packet->l3) {
5980 struct eth_header *eh;
b02475c5 5981 uint16_t mpls_depth;
999fba59
EJ
5982
5983 eth_pop_vlan(packet);
5984 eh = packet->l2;
0104aba8 5985
999fba59
EJ
5986 memcpy(eh->eth_src, ctx->flow.dl_src, sizeof eh->eth_src);
5987 memcpy(eh->eth_dst, ctx->flow.dl_dst, sizeof eh->eth_dst);
5988
5989 if (ctx->flow.vlan_tci & htons(VLAN_CFI)) {
5990 eth_push_vlan(packet, ctx->flow.vlan_tci);
5991 }
5992
b02475c5
SH
5993 mpls_depth = eth_mpls_depth(packet);
5994
5995 if (mpls_depth < ctx->flow.mpls_depth) {
5996 push_mpls(packet, ctx->flow.dl_type, ctx->flow.mpls_lse);
5997 } else if (mpls_depth > ctx->flow.mpls_depth) {
5998 pop_mpls(packet, ctx->flow.dl_type);
5999 } else if (mpls_depth) {
6000 set_mpls_lse(packet, ctx->flow.mpls_lse);
6001 }
6002
999fba59
EJ
6003 if (packet->l4) {
6004 if (ctx->flow.dl_type == htons(ETH_TYPE_IP)) {
6005 packet_set_ipv4(packet, ctx->flow.nw_src, ctx->flow.nw_dst,
6006 ctx->flow.nw_tos, ctx->flow.nw_ttl);
6007 }
6008
6009 if (packet->l7) {
6010 if (ctx->flow.nw_proto == IPPROTO_TCP) {
6011 packet_set_tcp_port(packet, ctx->flow.tp_src,
6012 ctx->flow.tp_dst);
6013 } else if (ctx->flow.nw_proto == IPPROTO_UDP) {
6014 packet_set_udp_port(packet, ctx->flow.tp_src,
6015 ctx->flow.tp_dst);
6016 }
6017 }
6018 }
6019 }
6020
6021 pin.packet = packet->data;
6022 pin.packet_len = packet->size;
f0fd1a17 6023 pin.reason = reason;
a7349929 6024 pin.controller_id = controller_id;
54834960 6025 pin.table_id = ctx->table_id;
18b2a258 6026 pin.cookie = ctx->rule ? ctx->rule->up.flow_cookie : 0;
54834960 6027
999fba59 6028 pin.send_len = len;
999fba59
EJ
6029 flow_get_metadata(&ctx->flow, &pin.fmd);
6030
d8653c38 6031 connmgr_send_packet_in(ctx->ofproto->up.connmgr, &pin);
999fba59 6032 ofpbuf_delete(packet);
6ff686f2
PS
6033}
6034
b02475c5
SH
6035static void
6036execute_mpls_push_action(struct action_xlate_ctx *ctx, ovs_be16 eth_type)
6037{
6038 ovs_assert(eth_type_mpls(eth_type));
6039
6040 if (ctx->base_flow.mpls_depth) {
6041 ctx->flow.mpls_lse &= ~htonl(MPLS_BOS_MASK);
6042 ctx->flow.mpls_depth++;
6043 } else {
6044 ovs_be32 label;
6045 uint8_t tc, ttl;
6046
6047 if (ctx->flow.dl_type == htons(ETH_TYPE_IPV6)) {
6048 label = htonl(0x2); /* IPV6 Explicit Null. */
6049 } else {
6050 label = htonl(0x0); /* IPV4 Explicit Null. */
6051 }
6052 tc = (ctx->flow.nw_tos & IP_DSCP_MASK) >> 2;
6053 ttl = ctx->flow.nw_ttl ? ctx->flow.nw_ttl : 0x40;
6054 ctx->flow.mpls_lse = set_mpls_lse_values(ttl, tc, 1, label);
6055 ctx->flow.encap_dl_type = ctx->flow.dl_type;
6056 ctx->flow.mpls_depth = 1;
6057 }
6058 ctx->flow.dl_type = eth_type;
6059}
6060
6061static void
6062execute_mpls_pop_action(struct action_xlate_ctx *ctx, ovs_be16 eth_type)
6063{
6064 ovs_assert(eth_type_mpls(ctx->flow.dl_type));
6065 ovs_assert(!eth_type_mpls(eth_type));
6066
6067 if (ctx->flow.mpls_depth) {
6068 ctx->flow.mpls_depth--;
6069 ctx->flow.mpls_lse = htonl(0);
6070 if (!ctx->flow.mpls_depth) {
6071 ctx->flow.dl_type = eth_type;
6072 ctx->flow.encap_dl_type = htons(0);
6073 }
6074 }
6075}
6076
f0fd1a17 6077static bool
c2d967a5 6078compose_dec_ttl(struct action_xlate_ctx *ctx, struct ofpact_cnt_ids *ids)
f0fd1a17
PS
6079{
6080 if (ctx->flow.dl_type != htons(ETH_TYPE_IP) &&
6081 ctx->flow.dl_type != htons(ETH_TYPE_IPV6)) {
6082 return false;
6083 }
6084
6085 if (ctx->flow.nw_ttl > 1) {
6086 ctx->flow.nw_ttl--;
6087 return false;
6088 } else {
c2d967a5
MM
6089 size_t i;
6090
6091 for (i = 0; i < ids->n_controllers; i++) {
6092 execute_controller_action(ctx, UINT16_MAX, OFPR_INVALID_TTL,
6093 ids->cnt_ids[i]);
6094 }
f0fd1a17
PS
6095
6096 /* Stop processing for current table. */
6097 return true;
6098 }
6099}
6100
abe529af 6101static void
f25d0cf3 6102xlate_output_action(struct action_xlate_ctx *ctx,
1688c479 6103 uint16_t port, uint16_t max_len, bool may_packet_in)
abe529af
BP
6104{
6105 uint16_t prev_nf_output_iface = ctx->nf_output_iface;
6106
6107 ctx->nf_output_iface = NF_OUT_DROP;
6108
6109 switch (port) {
6110 case OFPP_IN_PORT:
81b1afb1 6111 compose_output_action(ctx, ctx->flow.in_port);
abe529af
BP
6112 break;
6113 case OFPP_TABLE:
1688c479 6114 xlate_table_action(ctx, ctx->flow.in_port, 0, may_packet_in);
abe529af
BP
6115 break;
6116 case OFPP_NORMAL:
6117 xlate_normal(ctx);
6118 break;
6119 case OFPP_FLOOD:
d59906fb 6120 flood_packets(ctx, false);
abe529af
BP
6121 break;
6122 case OFPP_ALL:
d59906fb 6123 flood_packets(ctx, true);
abe529af
BP
6124 break;
6125 case OFPP_CONTROLLER:
a7349929 6126 execute_controller_action(ctx, max_len, OFPR_ACTION, 0);
abe529af 6127 break;
e81d2933
EJ
6128 case OFPP_NONE:
6129 break;
a0fbe94a 6130 case OFPP_LOCAL:
abe529af
BP
6131 default:
6132 if (port != ctx->flow.in_port) {
81b1afb1 6133 compose_output_action(ctx, port);
3dd3eace
BP
6134 } else {
6135 xlate_report(ctx, "skipping output to input port");
abe529af
BP
6136 }
6137 break;
6138 }
6139
6140 if (prev_nf_output_iface == NF_OUT_FLOOD) {
6141 ctx->nf_output_iface = NF_OUT_FLOOD;
6142 } else if (ctx->nf_output_iface == NF_OUT_DROP) {
6143 ctx->nf_output_iface = prev_nf_output_iface;
6144 } else if (prev_nf_output_iface != NF_OUT_DROP &&
6145 ctx->nf_output_iface != NF_OUT_FLOOD) {
6146 ctx->nf_output_iface = NF_OUT_MULTI;
6147 }
6148}
6149
f694937d
EJ
6150static void
6151xlate_output_reg_action(struct action_xlate_ctx *ctx,
f25d0cf3 6152 const struct ofpact_output_reg *or)
f694937d 6153{
f25d0cf3
BP
6154 uint64_t port = mf_get_subfield(&or->src, &ctx->flow);
6155 if (port <= UINT16_MAX) {
1688c479 6156 xlate_output_action(ctx, port, or->max_len, false);
f694937d
EJ
6157 }
6158}
6159
abe529af
BP
6160static void
6161xlate_enqueue_action(struct action_xlate_ctx *ctx,
f25d0cf3 6162 const struct ofpact_enqueue *enqueue)
abe529af 6163{
f25d0cf3
BP
6164 uint16_t ofp_port = enqueue->port;
6165 uint32_t queue_id = enqueue->queue;
abff858b 6166 uint32_t flow_priority, priority;
abe529af
BP
6167 int error;
6168
f25d0cf3 6169 /* Translate queue to priority. */
acf60855
JP
6170 error = dpif_queue_to_priority(ctx->ofproto->backer->dpif,
6171 queue_id, &priority);
abe529af
BP
6172 if (error) {
6173 /* Fall back to ordinary output action. */
1688c479 6174 xlate_output_action(ctx, enqueue->port, 0, false);
abe529af
BP
6175 return;
6176 }
6177
f25d0cf3 6178 /* Check output port. */
abe529af
BP
6179 if (ofp_port == OFPP_IN_PORT) {
6180 ofp_port = ctx->flow.in_port;
8ba855c1
BP
6181 } else if (ofp_port == ctx->flow.in_port) {
6182 return;
abe529af 6183 }
abe529af 6184
df2c07f4 6185 /* Add datapath actions. */
deedf7e7
BP
6186 flow_priority = ctx->flow.skb_priority;
6187 ctx->flow.skb_priority = priority;
81b1afb1 6188 compose_output_action(ctx, ofp_port);
deedf7e7 6189 ctx->flow.skb_priority = flow_priority;
abe529af
BP
6190
6191 /* Update NetFlow output port. */
6192 if (ctx->nf_output_iface == NF_OUT_DROP) {
4b23aebf 6193 ctx->nf_output_iface = ofp_port;
abe529af
BP
6194 } else if (ctx->nf_output_iface != NF_OUT_FLOOD) {
6195 ctx->nf_output_iface = NF_OUT_MULTI;
6196 }
6197}
6198
6199static void
f25d0cf3 6200xlate_set_queue_action(struct action_xlate_ctx *ctx, uint32_t queue_id)
abe529af 6201{
f25d0cf3 6202 uint32_t skb_priority;
abe529af 6203
acf60855
JP
6204 if (!dpif_queue_to_priority(ctx->ofproto->backer->dpif,
6205 queue_id, &skb_priority)) {
f25d0cf3
BP
6206 ctx->flow.skb_priority = skb_priority;
6207 } else {
6208 /* Couldn't translate queue to a priority. Nothing to do. A warning
abe529af 6209 * has already been logged. */
abe529af 6210 }
abe529af
BP
6211}
6212
6213struct xlate_reg_state {
6214 ovs_be16 vlan_tci;
6215 ovs_be64 tun_id;
6216};
6217
daff3353
EJ
6218static bool
6219slave_enabled_cb(uint16_t ofp_port, void *ofproto_)
6220{
6221 struct ofproto_dpif *ofproto = ofproto_;
6222 struct ofport_dpif *port;
6223
6224 switch (ofp_port) {
6225 case OFPP_IN_PORT:
6226 case OFPP_TABLE:
6227 case OFPP_NORMAL:
6228 case OFPP_FLOOD:
6229 case OFPP_ALL:
439e4d8c 6230 case OFPP_NONE:
daff3353
EJ
6231 return true;
6232 case OFPP_CONTROLLER: /* Not supported by the bundle action. */
6233 return false;
6234 default:
6235 port = get_ofp_port(ofproto, ofp_port);
6236 return port ? port->may_enable : false;
6237 }
6238}
6239
f25d0cf3
BP
6240static void
6241xlate_bundle_action(struct action_xlate_ctx *ctx,
6242 const struct ofpact_bundle *bundle)
6243{
6244 uint16_t port;
6245
6246 port = bundle_execute(bundle, &ctx->flow, slave_enabled_cb, ctx->ofproto);
6247 if (bundle->dst.field) {
6248 nxm_reg_load(&bundle->dst, port, &ctx->flow);
6249 } else {
1688c479 6250 xlate_output_action(ctx, port, 0, false);
f25d0cf3
BP
6251 }
6252}
6253
75a75043
BP
6254static void
6255xlate_learn_action(struct action_xlate_ctx *ctx,
f25d0cf3 6256 const struct ofpact_learn *learn)
75a75043
BP
6257{
6258 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 1);
6259 struct ofputil_flow_mod fm;
f25d0cf3
BP
6260 uint64_t ofpacts_stub[1024 / 8];
6261 struct ofpbuf ofpacts;
75a75043
BP
6262 int error;
6263
f25d0cf3
BP
6264 ofpbuf_use_stack(&ofpacts, ofpacts_stub, sizeof ofpacts_stub);
6265 learn_execute(learn, &ctx->flow, &fm, &ofpacts);
75a75043
BP
6266
6267 error = ofproto_flow_mod(&ctx->ofproto->up, &fm);
6268 if (error && !VLOG_DROP_WARN(&rl)) {
90bf1e07
BP
6269 VLOG_WARN("learning action failed to modify flow table (%s)",
6270 ofperr_get_name(error));
75a75043
BP
6271 }
6272
f25d0cf3 6273 ofpbuf_uninit(&ofpacts);
75a75043
BP
6274}
6275
0e553d9c
BP
6276/* Reduces '*timeout' to no more than 'max'. A value of zero in either case
6277 * means "infinite". */
6278static void
6279reduce_timeout(uint16_t max, uint16_t *timeout)
6280{
6281 if (max && (!*timeout || *timeout > max)) {
6282 *timeout = max;
6283 }
6284}
6285
6286static void
6287xlate_fin_timeout(struct action_xlate_ctx *ctx,
f25d0cf3 6288 const struct ofpact_fin_timeout *oft)
0e553d9c
BP
6289{
6290 if (ctx->tcp_flags & (TCP_FIN | TCP_RST) && ctx->rule) {
6291 struct rule_dpif *rule = ctx->rule;
6292
f25d0cf3
BP
6293 reduce_timeout(oft->fin_idle_timeout, &rule->up.idle_timeout);
6294 reduce_timeout(oft->fin_hard_timeout, &rule->up.hard_timeout);
0e553d9c
BP
6295 }
6296}
6297
21f7563c
JP
6298static bool
6299may_receive(const struct ofport_dpif *port, struct action_xlate_ctx *ctx)
6300{
9e1fd49b
BP
6301 if (port->up.pp.config & (eth_addr_equals(ctx->flow.dl_dst, eth_addr_stp)
6302 ? OFPUTIL_PC_NO_RECV_STP
6303 : OFPUTIL_PC_NO_RECV)) {
21f7563c
JP
6304 return false;
6305 }
6306
6307 /* Only drop packets here if both forwarding and learning are
6308 * disabled. If just learning is enabled, we need to have
6309 * OFPP_NORMAL and the learning action have a look at the packet
6310 * before we can drop it. */
6311 if (!stp_forward_in_state(port->stp_state)
6312 && !stp_learn_in_state(port->stp_state)) {
6313 return false;
6314 }
6315
6316 return true;
6317}
6318
abe529af 6319static void
f25d0cf3 6320do_xlate_actions(const struct ofpact *ofpacts, size_t ofpacts_len,
abe529af
BP
6321 struct action_xlate_ctx *ctx)
6322{
254750ce 6323 bool was_evictable = true;
f25d0cf3 6324 const struct ofpact *a;
abe529af 6325
254750ce
BP
6326 if (ctx->rule) {
6327 /* Don't let the rule we're working on get evicted underneath us. */
6328 was_evictable = ctx->rule->up.evictable;
6329 ctx->rule->up.evictable = false;
6330 }
f25d0cf3
BP
6331 OFPACT_FOR_EACH (a, ofpacts, ofpacts_len) {
6332 struct ofpact_controller *controller;
4cceacb9 6333 const struct ofpact_metadata *metadata;
38f2e360 6334
848e8809
EJ
6335 if (ctx->exit) {
6336 break;
6337 }
6338
f25d0cf3
BP
6339 switch (a->type) {
6340 case OFPACT_OUTPUT:
6341 xlate_output_action(ctx, ofpact_get_OUTPUT(a)->port,
1688c479 6342 ofpact_get_OUTPUT(a)->max_len, true);
f25d0cf3
BP
6343 break;
6344
6345 case OFPACT_CONTROLLER:
6346 controller = ofpact_get_CONTROLLER(a);
6347 execute_controller_action(ctx, controller->max_len,
6348 controller->reason,
6349 controller->controller_id);
6350 break;
690a61c5 6351
f25d0cf3
BP
6352 case OFPACT_ENQUEUE:
6353 xlate_enqueue_action(ctx, ofpact_get_ENQUEUE(a));
abe529af
BP
6354 break;
6355
f25d0cf3 6356 case OFPACT_SET_VLAN_VID:
abe529af 6357 ctx->flow.vlan_tci &= ~htons(VLAN_VID_MASK);
f25d0cf3
BP
6358 ctx->flow.vlan_tci |= (htons(ofpact_get_SET_VLAN_VID(a)->vlan_vid)
6359 | htons(VLAN_CFI));
abe529af
BP
6360 break;
6361
f25d0cf3 6362 case OFPACT_SET_VLAN_PCP:
abe529af 6363 ctx->flow.vlan_tci &= ~htons(VLAN_PCP_MASK);
f25d0cf3
BP
6364 ctx->flow.vlan_tci |= htons((ofpact_get_SET_VLAN_PCP(a)->vlan_pcp
6365 << VLAN_PCP_SHIFT)
6366 | VLAN_CFI);
abe529af
BP
6367 break;
6368
f25d0cf3 6369 case OFPACT_STRIP_VLAN:
abe529af 6370 ctx->flow.vlan_tci = htons(0);
abe529af
BP
6371 break;
6372
3e34fbdd 6373 case OFPACT_PUSH_VLAN:
5dca28b5 6374 /* XXX 802.1AD(QinQ) */
3e34fbdd
IY
6375 ctx->flow.vlan_tci = htons(VLAN_CFI);
6376 break;
6377
f25d0cf3
BP
6378 case OFPACT_SET_ETH_SRC:
6379 memcpy(ctx->flow.dl_src, ofpact_get_SET_ETH_SRC(a)->mac,
6380 ETH_ADDR_LEN);
abe529af
BP
6381 break;
6382
f25d0cf3
BP
6383 case OFPACT_SET_ETH_DST:
6384 memcpy(ctx->flow.dl_dst, ofpact_get_SET_ETH_DST(a)->mac,
6385 ETH_ADDR_LEN);
abe529af
BP
6386 break;
6387
f25d0cf3
BP
6388 case OFPACT_SET_IPV4_SRC:
6389 ctx->flow.nw_src = ofpact_get_SET_IPV4_SRC(a)->ipv4;
abe529af
BP
6390 break;
6391
f25d0cf3
BP
6392 case OFPACT_SET_IPV4_DST:
6393 ctx->flow.nw_dst = ofpact_get_SET_IPV4_DST(a)->ipv4;
abe529af
BP
6394 break;
6395
f25d0cf3 6396 case OFPACT_SET_IPV4_DSCP:
c4f2731d
PS
6397 /* OpenFlow 1.0 only supports IPv4. */
6398 if (ctx->flow.dl_type == htons(ETH_TYPE_IP)) {
6399 ctx->flow.nw_tos &= ~IP_DSCP_MASK;
f25d0cf3 6400 ctx->flow.nw_tos |= ofpact_get_SET_IPV4_DSCP(a)->dscp;
c4f2731d 6401 }
abe529af
BP
6402 break;
6403
f25d0cf3
BP
6404 case OFPACT_SET_L4_SRC_PORT:
6405 ctx->flow.tp_src = htons(ofpact_get_SET_L4_SRC_PORT(a)->port);
abe529af
BP
6406 break;
6407
f25d0cf3
BP
6408 case OFPACT_SET_L4_DST_PORT:
6409 ctx->flow.tp_dst = htons(ofpact_get_SET_L4_DST_PORT(a)->port);
abe529af
BP
6410 break;
6411
f25d0cf3
BP
6412 case OFPACT_RESUBMIT:
6413 xlate_ofpact_resubmit(ctx, ofpact_get_RESUBMIT(a));
38f2e360
BP
6414 break;
6415
f25d0cf3 6416 case OFPACT_SET_TUNNEL:
296e07ac 6417 ctx->flow.tunnel.tun_id = htonll(ofpact_get_SET_TUNNEL(a)->tun_id);
29901626
BP
6418 break;
6419
f25d0cf3
BP
6420 case OFPACT_SET_QUEUE:
6421 xlate_set_queue_action(ctx, ofpact_get_SET_QUEUE(a)->queue_id);
abe529af
BP
6422 break;
6423
f25d0cf3 6424 case OFPACT_POP_QUEUE:
deedf7e7 6425 ctx->flow.skb_priority = ctx->orig_skb_priority;
38f2e360
BP
6426 break;
6427
f25d0cf3
BP
6428 case OFPACT_REG_MOVE:
6429 nxm_execute_reg_move(ofpact_get_REG_MOVE(a), &ctx->flow);
38f2e360
BP
6430 break;
6431
f25d0cf3
BP
6432 case OFPACT_REG_LOAD:
6433 nxm_execute_reg_load(ofpact_get_REG_LOAD(a), &ctx->flow);
38f2e360
BP
6434 break;
6435
b02475c5
SH
6436 case OFPACT_PUSH_MPLS:
6437 execute_mpls_push_action(ctx, ofpact_get_PUSH_MPLS(a)->ethertype);
6438 break;
6439
6440 case OFPACT_POP_MPLS:
6441 execute_mpls_pop_action(ctx, ofpact_get_POP_MPLS(a)->ethertype);
6442 break;
6443
f25d0cf3 6444 case OFPACT_DEC_TTL:
c2d967a5 6445 if (compose_dec_ttl(ctx, ofpact_get_DEC_TTL(a))) {
f25d0cf3
BP
6446 goto out;
6447 }
38f2e360
BP
6448 break;
6449
f25d0cf3
BP
6450 case OFPACT_NOTE:
6451 /* Nothing to do. */
abe529af
BP
6452 break;
6453
f25d0cf3
BP
6454 case OFPACT_MULTIPATH:
6455 multipath_execute(ofpact_get_MULTIPATH(a), &ctx->flow);
abe529af 6456 break;
daff3353 6457
f25d0cf3 6458 case OFPACT_BUNDLE:
a368bb53 6459 ctx->ofproto->has_bundle_action = true;
f25d0cf3 6460 xlate_bundle_action(ctx, ofpact_get_BUNDLE(a));
a368bb53 6461 break;
f694937d 6462
f25d0cf3
BP
6463 case OFPACT_OUTPUT_REG:
6464 xlate_output_reg_action(ctx, ofpact_get_OUTPUT_REG(a));
f694937d 6465 break;
75a75043 6466
f25d0cf3 6467 case OFPACT_LEARN:
75a75043 6468 ctx->has_learn = true;
3de9590b 6469 if (ctx->may_learn) {
f25d0cf3 6470 xlate_learn_action(ctx, ofpact_get_LEARN(a));
75a75043
BP
6471 }
6472 break;
848e8809 6473
f25d0cf3 6474 case OFPACT_EXIT:
848e8809
EJ
6475 ctx->exit = true;
6476 break;
0e553d9c 6477
f25d0cf3 6478 case OFPACT_FIN_TIMEOUT:
0e553d9c 6479 ctx->has_fin_timeout = true;
f25d0cf3 6480 xlate_fin_timeout(ctx, ofpact_get_FIN_TIMEOUT(a));
a7349929 6481 break;
8dd54666 6482
b19e8793 6483 case OFPACT_CLEAR_ACTIONS:
5dca28b5 6484 /* XXX
b19e8793
IY
6485 * Nothing to do because writa-actions is not supported for now.
6486 * When writa-actions is supported, clear-actions also must
6487 * be supported at the same time.
6488 */
6489 break;
6490
4cceacb9
JS
6491 case OFPACT_WRITE_METADATA:
6492 metadata = ofpact_get_WRITE_METADATA(a);
6493 ctx->flow.metadata &= ~metadata->mask;
6494 ctx->flow.metadata |= metadata->metadata & metadata->mask;
6495 break;
6496
8dd54666 6497 case OFPACT_GOTO_TABLE: {
5dca28b5 6498 /* XXX remove recursion */
8dd54666
IY
6499 /* It is assumed that goto-table is last action */
6500 struct ofpact_goto_table *ogt = ofpact_get_GOTO_TABLE(a);
cb22974d 6501 ovs_assert(ctx->table_id < ogt->table_id);
8dd54666
IY
6502 xlate_table_action(ctx, ctx->flow.in_port, ogt->table_id, true);
6503 break;
6504 }
abe529af
BP
6505 }
6506 }
21f7563c 6507
f0fd1a17 6508out:
254750ce
BP
6509 if (ctx->rule) {
6510 ctx->rule->up.evictable = was_evictable;
6511 }
abe529af
BP
6512}
6513
6514static void
6515action_xlate_ctx_init(struct action_xlate_ctx *ctx,
6516 struct ofproto_dpif *ofproto, const struct flow *flow,
18b2a258 6517 ovs_be16 initial_tci, struct rule_dpif *rule,
0e553d9c 6518 uint8_t tcp_flags, const struct ofpbuf *packet)
abe529af 6519{
ef506a7c
JG
6520 ovs_be64 initial_tun_id = flow->tunnel.tun_id;
6521
6522 /* Flow initialization rules:
6523 * - 'base_flow' must match the kernel's view of the packet at the
6524 * time that action processing starts. 'flow' represents any
6525 * transformations we wish to make through actions.
6526 * - By default 'base_flow' and 'flow' are the same since the input
6527 * packet matches the output before any actions are applied.
6528 * - When using VLAN splinters, 'base_flow''s VLAN is set to the value
6529 * of the received packet as seen by the kernel. If we later output
6530 * to another device without any modifications this will cause us to
6531 * insert a new tag since the original one was stripped off by the
6532 * VLAN device.
6533 * - Tunnel 'flow' is largely cleared when transitioning between
6534 * the input and output stages since it does not make sense to output
6535 * a packet with the exact headers that it was received with (i.e.
6536 * the destination IP is us). The one exception is the tun_id, which
6537 * is preserved to allow use in later resubmit lookups and loads into
6538 * registers.
6539 * - Tunnel 'base_flow' is completely cleared since that is what the
6540 * kernel does. If we wish to maintain the original values an action
6541 * needs to be generated. */
6542
abe529af
BP
6543 ctx->ofproto = ofproto;
6544 ctx->flow = *flow;
47d4a9db 6545 memset(&ctx->flow.tunnel, 0, sizeof ctx->flow.tunnel);
e84173dc 6546 ctx->base_flow = ctx->flow;
e84173dc 6547 ctx->base_flow.vlan_tci = initial_tci;
ef506a7c 6548 ctx->flow.tunnel.tun_id = initial_tun_id;
18b2a258 6549 ctx->rule = rule;
abe529af 6550 ctx->packet = packet;
3de9590b 6551 ctx->may_learn = packet != NULL;
0e553d9c 6552 ctx->tcp_flags = tcp_flags;
abe529af 6553 ctx->resubmit_hook = NULL;
479df176 6554 ctx->report_hook = NULL;
112bc5f4 6555 ctx->resubmit_stats = NULL;
abe529af
BP
6556}
6557
f25d0cf3
BP
6558/* Translates the 'ofpacts_len' bytes of "struct ofpacts" starting at 'ofpacts'
6559 * into datapath actions in 'odp_actions', using 'ctx'. */
050ac423 6560static void
abe529af 6561xlate_actions(struct action_xlate_ctx *ctx,
f25d0cf3 6562 const struct ofpact *ofpacts, size_t ofpacts_len,
050ac423 6563 struct ofpbuf *odp_actions)
abe529af 6564{
43d50bc8
BP
6565 /* Normally false. Set to true if we ever hit MAX_RESUBMIT_RECURSION, so
6566 * that in the future we always keep a copy of the original flow for
6567 * tracing purposes. */
6568 static bool hit_resubmit_limit;
6569
6a7e895f 6570 enum slow_path_reason special;
ffaef958 6571 struct ofport_dpif *in_port;
9ba85077 6572 struct flow orig_flow;
6a7e895f 6573
abe529af
BP
6574 COVERAGE_INC(ofproto_dpif_xlate);
6575
050ac423
BP
6576 ofpbuf_clear(odp_actions);
6577 ofpbuf_reserve(odp_actions, NL_A_U32_SIZE);
6578
6579 ctx->odp_actions = odp_actions;
97e42c92 6580 ctx->tags = 0;
6a7e895f 6581 ctx->slow = 0;
97e42c92
BP
6582 ctx->has_learn = false;
6583 ctx->has_normal = false;
0e553d9c 6584 ctx->has_fin_timeout = false;
97e42c92 6585 ctx->nf_output_iface = NF_OUT_DROP;
9d24de3b 6586 ctx->mirrors = 0;
97e42c92 6587 ctx->recurse = 0;
6a6455e5 6588 ctx->max_resubmit_trigger = false;
deedf7e7 6589 ctx->orig_skb_priority = ctx->flow.skb_priority;
97e42c92 6590 ctx->table_id = 0;
848e8809 6591 ctx->exit = false;
7257b535 6592
43d50bc8 6593 if (ctx->ofproto->has_mirrors || hit_resubmit_limit) {
ccb7c863 6594 /* Do this conditionally because the copy is expensive enough that it
9ba85077
BP
6595 * shows up in profiles. */
6596 orig_flow = ctx->flow;
ccb7c863
BP
6597 }
6598
eadef313 6599 if (ctx->flow.nw_frag & FLOW_NW_FRAG_ANY) {
7257b535
BP
6600 switch (ctx->ofproto->up.frag_handling) {
6601 case OFPC_FRAG_NORMAL:
6602 /* We must pretend that transport ports are unavailable. */
97e42c92
BP
6603 ctx->flow.tp_src = ctx->base_flow.tp_src = htons(0);
6604 ctx->flow.tp_dst = ctx->base_flow.tp_dst = htons(0);
7257b535
BP
6605 break;
6606
6607 case OFPC_FRAG_DROP:
050ac423 6608 return;
7257b535
BP
6609
6610 case OFPC_FRAG_REASM:
6611 NOT_REACHED();
6612
6613 case OFPC_FRAG_NX_MATCH:
6614 /* Nothing to do. */
6615 break;
f0fd1a17
PS
6616
6617 case OFPC_INVALID_TTL_TO_CONTROLLER:
6618 NOT_REACHED();
7257b535
BP
6619 }
6620 }
6621
ffaef958
BP
6622 in_port = get_ofp_port(ctx->ofproto, ctx->flow.in_port);
6623 special = process_special(ctx->ofproto, &ctx->flow, in_port, ctx->packet);
6a7e895f
BP
6624 if (special) {
6625 ctx->slow |= special;
abe529af 6626 } else {
6a6455e5 6627 static struct vlog_rate_limit trace_rl = VLOG_RATE_LIMIT_INIT(1, 1);
6a6455e5 6628 ovs_be16 initial_tci = ctx->base_flow.vlan_tci;
ee382d89 6629 uint32_t local_odp_port;
6a6455e5 6630
6ff686f2 6631 add_sflow_action(ctx);
ffaef958
BP
6632
6633 if (!in_port || may_receive(in_port, ctx)) {
6634 do_xlate_actions(ofpacts, ofpacts_len, ctx);
6635
6636 /* We've let OFPP_NORMAL and the learning action look at the
6637 * packet, so drop it now if forwarding is disabled. */
6638 if (in_port && !stp_forward_in_state(in_port->stp_state)) {
6639 ofpbuf_clear(ctx->odp_actions);
6640 add_sflow_action(ctx);
6641 }
6642 }
abe529af 6643
43d50bc8
BP
6644 if (ctx->max_resubmit_trigger && !ctx->resubmit_hook) {
6645 if (!hit_resubmit_limit) {
6646 /* We didn't record the original flow. Make sure we do from
6647 * now on. */
6648 hit_resubmit_limit = true;
6649 } else if (!VLOG_DROP_ERR(&trace_rl)) {
6650 struct ds ds = DS_EMPTY_INITIALIZER;
6651
9ba85077 6652 ofproto_trace(ctx->ofproto, &orig_flow, ctx->packet,
43d50bc8
BP
6653 initial_tci, &ds);
6654 VLOG_ERR("Trace triggered by excessive resubmit "
6655 "recursion:\n%s", ds_cstr(&ds));
6656 ds_destroy(&ds);
6657 }
6a6455e5
EJ
6658 }
6659
ee382d89 6660 local_odp_port = ofp_port_to_odp_port(ctx->ofproto, OFPP_LOCAL);
b6848f13 6661 if (!connmgr_may_set_up_flow(ctx->ofproto->up.connmgr, &ctx->flow,
ee382d89 6662 local_odp_port,
b6848f13
BP
6663 ctx->odp_actions->data,
6664 ctx->odp_actions->size)) {
6a7e895f 6665 ctx->slow |= SLOW_IN_BAND;
b6848f13
BP
6666 if (ctx->packet
6667 && connmgr_msg_in_hook(ctx->ofproto->up.connmgr, &ctx->flow,
6668 ctx->packet)) {
5e48dc2b 6669 compose_output_action(ctx, OFPP_LOCAL);
b6848f13
BP
6670 }
6671 }
ccb7c863 6672 if (ctx->ofproto->has_mirrors) {
9ba85077 6673 add_mirror_actions(ctx, &orig_flow);
ccb7c863 6674 }
a7c4eaf6 6675 fix_sflow_action(ctx);
abe529af 6676 }
050ac423
BP
6677}
6678
f25d0cf3
BP
6679/* Translates the 'ofpacts_len' bytes of "struct ofpact"s starting at 'ofpacts'
6680 * into datapath actions, using 'ctx', and discards the datapath actions. */
050ac423
BP
6681static void
6682xlate_actions_for_side_effects(struct action_xlate_ctx *ctx,
f25d0cf3
BP
6683 const struct ofpact *ofpacts,
6684 size_t ofpacts_len)
050ac423
BP
6685{
6686 uint64_t odp_actions_stub[1024 / 8];
6687 struct ofpbuf odp_actions;
abe529af 6688
050ac423 6689 ofpbuf_use_stub(&odp_actions, odp_actions_stub, sizeof odp_actions_stub);
f25d0cf3 6690 xlate_actions(ctx, ofpacts, ofpacts_len, &odp_actions);
050ac423 6691 ofpbuf_uninit(&odp_actions);
abe529af 6692}
479df176
BP
6693
6694static void
6695xlate_report(struct action_xlate_ctx *ctx, const char *s)
6696{
6697 if (ctx->report_hook) {
6698 ctx->report_hook(ctx, s);
6699 }
6700}
abe529af
BP
6701\f
6702/* OFPP_NORMAL implementation. */
6703
abe529af
BP
6704static struct ofport_dpif *ofbundle_get_a_port(const struct ofbundle *);
6705
ecac4ebf
BP
6706/* Given 'vid', the VID obtained from the 802.1Q header that was received as
6707 * part of a packet (specify 0 if there was no 802.1Q header), and 'in_bundle',
6708 * the bundle on which the packet was received, returns the VLAN to which the
6709 * packet belongs.
6710 *
6711 * Both 'vid' and the return value are in the range 0...4095. */
6712static uint16_t
6713input_vid_to_vlan(const struct ofbundle *in_bundle, uint16_t vid)
6714{
6715 switch (in_bundle->vlan_mode) {
6716 case PORT_VLAN_ACCESS:
6717 return in_bundle->vlan;
6718 break;
6719
6720 case PORT_VLAN_TRUNK:
6721 return vid;
6722
6723 case PORT_VLAN_NATIVE_UNTAGGED:
6724 case PORT_VLAN_NATIVE_TAGGED:
6725 return vid ? vid : in_bundle->vlan;
6726
6727 default:
6728 NOT_REACHED();
6729 }
6730}
6731
5da5ec37
BP
6732/* Checks whether a packet with the given 'vid' may ingress on 'in_bundle'.
6733 * If so, returns true. Otherwise, returns false and, if 'warn' is true, logs
6734 * a warning.
6735 *
6736 * 'vid' should be the VID obtained from the 802.1Q header that was received as
6737 * part of a packet (specify 0 if there was no 802.1Q header), in the range
6738 * 0...4095. */
6739static bool
6740input_vid_is_valid(uint16_t vid, struct ofbundle *in_bundle, bool warn)
6741{
33158a18
JP
6742 /* Allow any VID on the OFPP_NONE port. */
6743 if (in_bundle == &ofpp_none_bundle) {
6744 return true;
6745 }
6746
5da5ec37
BP
6747 switch (in_bundle->vlan_mode) {
6748 case PORT_VLAN_ACCESS:
6749 if (vid) {
6750 if (warn) {
6751 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
6752 VLOG_WARN_RL(&rl, "bridge %s: dropping VLAN %"PRIu16" tagged "
6753 "packet received on port %s configured as VLAN "
6754 "%"PRIu16" access port",
6755 in_bundle->ofproto->up.name, vid,
6756 in_bundle->name, in_bundle->vlan);
6757 }
6758 return false;
6759 }
6760 return true;
6761
6762 case PORT_VLAN_NATIVE_UNTAGGED:
6763 case PORT_VLAN_NATIVE_TAGGED:
6764 if (!vid) {
6765 /* Port must always carry its native VLAN. */
6766 return true;
6767 }
6768 /* Fall through. */
6769 case PORT_VLAN_TRUNK:
6770 if (!ofbundle_includes_vlan(in_bundle, vid)) {
6771 if (warn) {
6772 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
6773 VLOG_WARN_RL(&rl, "bridge %s: dropping VLAN %"PRIu16" packet "
6774 "received on port %s not configured for trunking "
6775 "VLAN %"PRIu16,
6776 in_bundle->ofproto->up.name, vid,
6777 in_bundle->name, vid);
6778 }
6779 return false;
6780 }
6781 return true;
6782
6783 default:
6784 NOT_REACHED();
6785 }
6786
6787}
6788
ecac4ebf
BP
6789/* Given 'vlan', the VLAN that a packet belongs to, and
6790 * 'out_bundle', a bundle on which the packet is to be output, returns the VID
6791 * that should be included in the 802.1Q header. (If the return value is 0,
6792 * then the 802.1Q header should only be included in the packet if there is a
6793 * nonzero PCP.)
6794 *
6795 * Both 'vlan' and the return value are in the range 0...4095. */
6796static uint16_t
6797output_vlan_to_vid(const struct ofbundle *out_bundle, uint16_t vlan)
6798{
6799 switch (out_bundle->vlan_mode) {
6800 case PORT_VLAN_ACCESS:
6801 return 0;
6802
6803 case PORT_VLAN_TRUNK:
6804 case PORT_VLAN_NATIVE_TAGGED:
6805 return vlan;
6806
6807 case PORT_VLAN_NATIVE_UNTAGGED:
6808 return vlan == out_bundle->vlan ? 0 : vlan;
6809
6810 default:
6811 NOT_REACHED();
6812 }
6813}
6814
395e68ce
BP
6815static void
6816output_normal(struct action_xlate_ctx *ctx, const struct ofbundle *out_bundle,
6817 uint16_t vlan)
abe529af 6818{
395e68ce
BP
6819 struct ofport_dpif *port;
6820 uint16_t vid;
81b1afb1 6821 ovs_be16 tci, old_tci;
ecac4ebf 6822
395e68ce
BP
6823 vid = output_vlan_to_vid(out_bundle, vlan);
6824 if (!out_bundle->bond) {
6825 port = ofbundle_get_a_port(out_bundle);
6826 } else {
6827 port = bond_choose_output_slave(out_bundle->bond, &ctx->flow,
6828 vid, &ctx->tags);
6829 if (!port) {
6830 /* No slaves enabled, so drop packet. */
6831 return;
6832 }
6833 }
abe529af 6834
81b1afb1 6835 old_tci = ctx->flow.vlan_tci;
5e9ceccd
BP
6836 tci = htons(vid);
6837 if (tci || out_bundle->use_priority_tags) {
6838 tci |= ctx->flow.vlan_tci & htons(VLAN_PCP_MASK);
6839 if (tci) {
6840 tci |= htons(VLAN_CFI);
6841 }
395e68ce 6842 }
81b1afb1 6843 ctx->flow.vlan_tci = tci;
395e68ce 6844
5e48dc2b 6845 compose_output_action(ctx, port->up.ofp_port);
81b1afb1 6846 ctx->flow.vlan_tci = old_tci;
abe529af
BP
6847}
6848
6849static int
6850mirror_mask_ffs(mirror_mask_t mask)
6851{
6852 BUILD_ASSERT_DECL(sizeof(unsigned int) >= sizeof(mask));
6853 return ffs(mask);
6854}
6855
abe529af
BP
6856static bool
6857ofbundle_trunks_vlan(const struct ofbundle *bundle, uint16_t vlan)
6858{
ecac4ebf 6859 return (bundle->vlan_mode != PORT_VLAN_ACCESS
fc3d7408 6860 && (!bundle->trunks || bitmap_is_set(bundle->trunks, vlan)));
abe529af
BP
6861}
6862
6863static bool
6864ofbundle_includes_vlan(const struct ofbundle *bundle, uint16_t vlan)
6865{
6866 return vlan == bundle->vlan || ofbundle_trunks_vlan(bundle, vlan);
6867}
6868
6869/* Returns an arbitrary interface within 'bundle'. */
6870static struct ofport_dpif *
6871ofbundle_get_a_port(const struct ofbundle *bundle)
6872{
6873 return CONTAINER_OF(list_front(&bundle->ports),
6874 struct ofport_dpif, bundle_node);
6875}
6876
abe529af
BP
6877static bool
6878vlan_is_mirrored(const struct ofmirror *m, int vlan)
6879{
fc3d7408 6880 return !m->vlans || bitmap_is_set(m->vlans, vlan);
abe529af
BP
6881}
6882
6883static void
c06bba01 6884add_mirror_actions(struct action_xlate_ctx *ctx, const struct flow *orig_flow)
abe529af
BP
6885{
6886 struct ofproto_dpif *ofproto = ctx->ofproto;
6887 mirror_mask_t mirrors;
c06bba01
JP
6888 struct ofbundle *in_bundle;
6889 uint16_t vlan;
6890 uint16_t vid;
6891 const struct nlattr *a;
6892 size_t left;
6893
3581c12c 6894 in_bundle = lookup_input_bundle(ctx->ofproto, orig_flow->in_port,
70c2fd56 6895 ctx->packet != NULL, NULL);
3581c12c 6896 if (!in_bundle) {
c06bba01
JP
6897 return;
6898 }
c06bba01
JP
6899 mirrors = in_bundle->src_mirrors;
6900
6901 /* Drop frames on bundles reserved for mirroring. */
6902 if (in_bundle->mirror_out) {
6903 if (ctx->packet != NULL) {
6904 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
6905 VLOG_WARN_RL(&rl, "bridge %s: dropping packet received on port "
6906 "%s, which is reserved exclusively for mirroring",
6907 ctx->ofproto->up.name, in_bundle->name);
6908 }
6909 return;
6910 }
6911
6912 /* Check VLAN. */
6913 vid = vlan_tci_to_vid(orig_flow->vlan_tci);
6914 if (!input_vid_is_valid(vid, in_bundle, ctx->packet != NULL)) {
6915 return;
6916 }
6917 vlan = input_vid_to_vlan(in_bundle, vid);
6918
6919 /* Look at the output ports to check for destination selections. */
6920
6921 NL_ATTR_FOR_EACH (a, left, ctx->odp_actions->data,
6922 ctx->odp_actions->size) {
6923 enum ovs_action_attr type = nl_attr_type(a);
6924 struct ofport_dpif *ofport;
6925
6926 if (type != OVS_ACTION_ATTR_OUTPUT) {
6927 continue;
6928 }
6929
6930 ofport = get_odp_port(ofproto, nl_attr_get_u32(a));
521472bc
BP
6931 if (ofport && ofport->bundle) {
6932 mirrors |= ofport->bundle->dst_mirrors;
6933 }
c06bba01 6934 }
abe529af
BP
6935
6936 if (!mirrors) {
6937 return;
6938 }
6939
c06bba01
JP
6940 /* Restore the original packet before adding the mirror actions. */
6941 ctx->flow = *orig_flow;
6942
9ba15e2a
BP
6943 while (mirrors) {
6944 struct ofmirror *m;
9ba15e2a
BP
6945
6946 m = ofproto->mirrors[mirror_mask_ffs(mirrors) - 1];
6947
6948 if (!vlan_is_mirrored(m, vlan)) {
8472a3ce 6949 mirrors = zero_rightmost_1bit(mirrors);
9ba15e2a
BP
6950 continue;
6951 }
6952
6953 mirrors &= ~m->dup_mirrors;
9d24de3b 6954 ctx->mirrors |= m->dup_mirrors;
9ba15e2a 6955 if (m->out) {
395e68ce 6956 output_normal(ctx, m->out, vlan);
614ec445
EJ
6957 } else if (vlan != m->out_vlan
6958 && !eth_addr_is_reserved(orig_flow->dl_dst)) {
9ba15e2a
BP
6959 struct ofbundle *bundle;
6960
6961 HMAP_FOR_EACH (bundle, hmap_node, &ofproto->bundles) {
6962 if (ofbundle_includes_vlan(bundle, m->out_vlan)
395e68ce
BP
6963 && !bundle->mirror_out) {
6964 output_normal(ctx, bundle, m->out_vlan);
abe529af
BP
6965 }
6966 }
6967 }
abe529af
BP
6968 }
6969}
6970
9d24de3b
JP
6971static void
6972update_mirror_stats(struct ofproto_dpif *ofproto, mirror_mask_t mirrors,
6973 uint64_t packets, uint64_t bytes)
6974{
6975 if (!mirrors) {
6976 return;
6977 }
6978
8472a3ce 6979 for (; mirrors; mirrors = zero_rightmost_1bit(mirrors)) {
9d24de3b
JP
6980 struct ofmirror *m;
6981
6982 m = ofproto->mirrors[mirror_mask_ffs(mirrors) - 1];
6983
6984 if (!m) {
6985 /* In normal circumstances 'm' will not be NULL. However,
6986 * if mirrors are reconfigured, we can temporarily get out
6987 * of sync in facet_revalidate(). We could "correct" the
6988 * mirror list before reaching here, but doing that would
6989 * not properly account the traffic stats we've currently
6990 * accumulated for previous mirror configuration. */
6991 continue;
6992 }
6993
6994 m->packet_count += packets;
6995 m->byte_count += bytes;
6996 }
6997}
6998
abe529af
BP
6999/* A VM broadcasts a gratuitous ARP to indicate that it has resumed after
7000 * migration. Older Citrix-patched Linux DomU used gratuitous ARP replies to
7001 * indicate this; newer upstream kernels use gratuitous ARP requests. */
7002static bool
7003is_gratuitous_arp(const struct flow *flow)
7004{
7005 return (flow->dl_type == htons(ETH_TYPE_ARP)
7006 && eth_addr_is_broadcast(flow->dl_dst)
7007 && (flow->nw_proto == ARP_OP_REPLY
7008 || (flow->nw_proto == ARP_OP_REQUEST
7009 && flow->nw_src == flow->nw_dst)));
7010}
7011
7012static void
7013update_learning_table(struct ofproto_dpif *ofproto,
7014 const struct flow *flow, int vlan,
7015 struct ofbundle *in_bundle)
7016{
7017 struct mac_entry *mac;
7018
33158a18
JP
7019 /* Don't learn the OFPP_NONE port. */
7020 if (in_bundle == &ofpp_none_bundle) {
7021 return;
7022 }
7023
abe529af
BP
7024 if (!mac_learning_may_learn(ofproto->ml, flow->dl_src, vlan)) {
7025 return;
7026 }
7027
7028 mac = mac_learning_insert(ofproto->ml, flow->dl_src, vlan);
7029 if (is_gratuitous_arp(flow)) {
7030 /* We don't want to learn from gratuitous ARP packets that are
7031 * reflected back over bond slaves so we lock the learning table. */
7032 if (!in_bundle->bond) {
7033 mac_entry_set_grat_arp_lock(mac);
7034 } else if (mac_entry_is_grat_arp_locked(mac)) {
7035 return;
7036 }
7037 }
7038
7039 if (mac_entry_is_new(mac) || mac->port.p != in_bundle) {
7040 /* The log messages here could actually be useful in debugging,
7041 * so keep the rate limit relatively high. */
7042 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(30, 300);
7043 VLOG_DBG_RL(&rl, "bridge %s: learned that "ETH_ADDR_FMT" is "
7044 "on port %s in VLAN %d",
7045 ofproto->up.name, ETH_ADDR_ARGS(flow->dl_src),
7046 in_bundle->name, vlan);
7047
7048 mac->port.p = in_bundle;
2cc3c58e 7049 tag_set_add(&ofproto->backer->revalidate_set,
abe529af
BP
7050 mac_learning_changed(ofproto->ml, mac));
7051 }
7052}
7053
3581c12c 7054static struct ofbundle *
4acbc98d
SH
7055lookup_input_bundle(const struct ofproto_dpif *ofproto, uint16_t in_port,
7056 bool warn, struct ofport_dpif **in_ofportp)
395e68ce
BP
7057{
7058 struct ofport_dpif *ofport;
7059
7060 /* Find the port and bundle for the received packet. */
7061 ofport = get_ofp_port(ofproto, in_port);
70c2fd56
BP
7062 if (in_ofportp) {
7063 *in_ofportp = ofport;
7064 }
395e68ce 7065 if (ofport && ofport->bundle) {
3581c12c 7066 return ofport->bundle;
395e68ce
BP
7067 }
7068
70c2fd56
BP
7069 /* Special-case OFPP_NONE, which a controller may use as the ingress
7070 * port for traffic that it is sourcing. */
7071 if (in_port == OFPP_NONE) {
7072 return &ofpp_none_bundle;
7073 }
7074
395e68ce
BP
7075 /* Odd. A few possible reasons here:
7076 *
7077 * - We deleted a port but there are still a few packets queued up
7078 * from it.
7079 *
7080 * - Someone externally added a port (e.g. "ovs-dpctl add-if") that
7081 * we don't know about.
7082 *
7083 * - The ofproto client didn't configure the port as part of a bundle.
6b803ddc
EJ
7084 * This is particularly likely to happen if a packet was received on the
7085 * port after it was created, but before the client had a chance to
7086 * configure its bundle.
395e68ce
BP
7087 */
7088 if (warn) {
7089 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
7090
7091 VLOG_WARN_RL(&rl, "bridge %s: received packet on unknown "
7092 "port %"PRIu16, ofproto->up.name, in_port);
7093 }
7094 return NULL;
7095}
7096
5da5ec37 7097/* Determines whether packets in 'flow' within 'ofproto' should be forwarded or
abe529af
BP
7098 * dropped. Returns true if they may be forwarded, false if they should be
7099 * dropped.
7100 *
395e68ce
BP
7101 * 'in_port' must be the ofport_dpif that corresponds to flow->in_port.
7102 * 'in_port' must be part of a bundle (e.g. in_port->bundle must be nonnull).
abe529af 7103 *
395e68ce
BP
7104 * 'vlan' must be the VLAN that corresponds to flow->vlan_tci on 'in_port', as
7105 * returned by input_vid_to_vlan(). It must be a valid VLAN for 'in_port', as
7106 * checked by input_vid_is_valid().
abe529af
BP
7107 *
7108 * May also add tags to '*tags', although the current implementation only does
7109 * so in one special case.
7110 */
7111static bool
479df176
BP
7112is_admissible(struct action_xlate_ctx *ctx, struct ofport_dpif *in_port,
7113 uint16_t vlan)
abe529af 7114{
479df176
BP
7115 struct ofproto_dpif *ofproto = ctx->ofproto;
7116 struct flow *flow = &ctx->flow;
395e68ce 7117 struct ofbundle *in_bundle = in_port->bundle;
abe529af 7118
395e68ce
BP
7119 /* Drop frames for reserved multicast addresses
7120 * only if forward_bpdu option is absent. */
614ec445 7121 if (!ofproto->up.forward_bpdu && eth_addr_is_reserved(flow->dl_dst)) {
479df176 7122 xlate_report(ctx, "packet has reserved destination MAC, dropping");
abe529af
BP
7123 return false;
7124 }
7125
abe529af
BP
7126 if (in_bundle->bond) {
7127 struct mac_entry *mac;
7128
7129 switch (bond_check_admissibility(in_bundle->bond, in_port,
479df176 7130 flow->dl_dst, &ctx->tags)) {
abe529af
BP
7131 case BV_ACCEPT:
7132 break;
7133
7134 case BV_DROP:
479df176 7135 xlate_report(ctx, "bonding refused admissibility, dropping");
abe529af
BP
7136 return false;
7137
7138 case BV_DROP_IF_MOVED:
7139 mac = mac_learning_lookup(ofproto->ml, flow->dl_src, vlan, NULL);
7140 if (mac && mac->port.p != in_bundle &&
7141 (!is_gratuitous_arp(flow)
7142 || mac_entry_is_grat_arp_locked(mac))) {
479df176
BP
7143 xlate_report(ctx, "SLB bond thinks this packet looped back, "
7144 "dropping");
abe529af
BP
7145 return false;
7146 }
7147 break;
7148 }
7149 }
7150
7151 return true;
7152}
7153
4cd78906 7154static void
abe529af
BP
7155xlate_normal(struct action_xlate_ctx *ctx)
7156{
395e68ce 7157 struct ofport_dpif *in_port;
abe529af 7158 struct ofbundle *in_bundle;
abe529af 7159 struct mac_entry *mac;
395e68ce
BP
7160 uint16_t vlan;
7161 uint16_t vid;
abe529af 7162
75a75043
BP
7163 ctx->has_normal = true;
7164
3581c12c 7165 in_bundle = lookup_input_bundle(ctx->ofproto, ctx->flow.in_port,
70c2fd56 7166 ctx->packet != NULL, &in_port);
3581c12c 7167 if (!in_bundle) {
479df176 7168 xlate_report(ctx, "no input bundle, dropping");
395e68ce
BP
7169 return;
7170 }
3581c12c 7171
395e68ce
BP
7172 /* Drop malformed frames. */
7173 if (ctx->flow.dl_type == htons(ETH_TYPE_VLAN) &&
7174 !(ctx->flow.vlan_tci & htons(VLAN_CFI))) {
7175 if (ctx->packet != NULL) {
7176 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
7177 VLOG_WARN_RL(&rl, "bridge %s: dropping packet with partial "
7178 "VLAN tag received on port %s",
7179 ctx->ofproto->up.name, in_bundle->name);
7180 }
479df176 7181 xlate_report(ctx, "partial VLAN tag, dropping");
395e68ce
BP
7182 return;
7183 }
7184
7185 /* Drop frames on bundles reserved for mirroring. */
7186 if (in_bundle->mirror_out) {
7187 if (ctx->packet != NULL) {
7188 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
7189 VLOG_WARN_RL(&rl, "bridge %s: dropping packet received on port "
7190 "%s, which is reserved exclusively for mirroring",
7191 ctx->ofproto->up.name, in_bundle->name);
7192 }
479df176 7193 xlate_report(ctx, "input port is mirror output port, dropping");
395e68ce
BP
7194 return;
7195 }
7196
7197 /* Check VLAN. */
7198 vid = vlan_tci_to_vid(ctx->flow.vlan_tci);
7199 if (!input_vid_is_valid(vid, in_bundle, ctx->packet != NULL)) {
479df176 7200 xlate_report(ctx, "disallowed VLAN VID for this input port, dropping");
395e68ce
BP
7201 return;
7202 }
7203 vlan = input_vid_to_vlan(in_bundle, vid);
7204
7205 /* Check other admissibility requirements. */
479df176 7206 if (in_port && !is_admissible(ctx, in_port, vlan)) {
395e68ce 7207 return;
abe529af
BP
7208 }
7209
75a75043 7210 /* Learn source MAC. */
3de9590b 7211 if (ctx->may_learn) {
abe529af
BP
7212 update_learning_table(ctx->ofproto, &ctx->flow, vlan, in_bundle);
7213 }
7214
7215 /* Determine output bundle. */
7216 mac = mac_learning_lookup(ctx->ofproto->ml, ctx->flow.dl_dst, vlan,
7217 &ctx->tags);
7218 if (mac) {
c06bba01 7219 if (mac->port.p != in_bundle) {
479df176 7220 xlate_report(ctx, "forwarding to learned port");
c06bba01 7221 output_normal(ctx, mac->port.p, vlan);
479df176
BP
7222 } else {
7223 xlate_report(ctx, "learned port is input port, dropping");
c06bba01 7224 }
abe529af 7225 } else {
c06bba01 7226 struct ofbundle *bundle;
abe529af 7227
479df176 7228 xlate_report(ctx, "no learned MAC for destination, flooding");
c06bba01
JP
7229 HMAP_FOR_EACH (bundle, hmap_node, &ctx->ofproto->bundles) {
7230 if (bundle != in_bundle
7231 && ofbundle_includes_vlan(bundle, vlan)
7232 && bundle->floodable
7233 && !bundle->mirror_out) {
7234 output_normal(ctx, bundle, vlan);
7235 }
7236 }
7237 ctx->nf_output_iface = NF_OUT_FLOOD;
abe529af 7238 }
abe529af
BP
7239}
7240\f
54a9cbc9
BP
7241/* Optimized flow revalidation.
7242 *
7243 * It's a difficult problem, in general, to tell which facets need to have
7244 * their actions recalculated whenever the OpenFlow flow table changes. We
7245 * don't try to solve that general problem: for most kinds of OpenFlow flow
7246 * table changes, we recalculate the actions for every facet. This is
7247 * relatively expensive, but it's good enough if the OpenFlow flow table
7248 * doesn't change very often.
7249 *
7250 * However, we can expect one particular kind of OpenFlow flow table change to
7251 * happen frequently: changes caused by MAC learning. To avoid wasting a lot
7252 * of CPU on revalidating every facet whenever MAC learning modifies the flow
7253 * table, we add a special case that applies to flow tables in which every rule
7254 * has the same form (that is, the same wildcards), except that the table is
7255 * also allowed to have a single "catch-all" flow that matches all packets. We
7256 * optimize this case by tagging all of the facets that resubmit into the table
7257 * and invalidating the same tag whenever a flow changes in that table. The
7258 * end result is that we revalidate just the facets that need it (and sometimes
7259 * a few more, but not all of the facets or even all of the facets that
7260 * resubmit to the table modified by MAC learning). */
7261
5cb7a798 7262/* Calculates the tag to use for 'flow' and mask 'mask' when it is inserted
54a9cbc9 7263 * into an OpenFlow table with the given 'basis'. */
822d9414 7264static tag_type
5cb7a798 7265rule_calculate_tag(const struct flow *flow, const struct minimask *mask,
54a9cbc9
BP
7266 uint32_t secret)
7267{
5cb7a798 7268 if (minimask_is_catchall(mask)) {
54a9cbc9
BP
7269 return 0;
7270 } else {
5cb7a798
BP
7271 uint32_t hash = flow_hash_in_minimask(flow, mask, secret);
7272 return tag_create_deterministic(hash);
54a9cbc9
BP
7273 }
7274}
7275
7276/* Following a change to OpenFlow table 'table_id' in 'ofproto', update the
7277 * taggability of that table.
7278 *
7279 * This function must be called after *each* change to a flow table. If you
7280 * skip calling it on some changes then the pointer comparisons at the end can
7281 * be invalid if you get unlucky. For example, if a flow removal causes a
7282 * cls_table to be destroyed and then a flow insertion causes a cls_table with
7283 * different wildcards to be created with the same address, then this function
7284 * will incorrectly skip revalidation. */
7285static void
7286table_update_taggable(struct ofproto_dpif *ofproto, uint8_t table_id)
7287{
7288 struct table_dpif *table = &ofproto->tables[table_id];
d0918789 7289 const struct oftable *oftable = &ofproto->up.tables[table_id];
54a9cbc9
BP
7290 struct cls_table *catchall, *other;
7291 struct cls_table *t;
7292
7293 catchall = other = NULL;
7294
d0918789 7295 switch (hmap_count(&oftable->cls.tables)) {
54a9cbc9
BP
7296 case 0:
7297 /* We could tag this OpenFlow table but it would make the logic a
7298 * little harder and it's a corner case that doesn't seem worth it
7299 * yet. */
7300 break;
7301
7302 case 1:
7303 case 2:
d0918789 7304 HMAP_FOR_EACH (t, hmap_node, &oftable->cls.tables) {
54a9cbc9
BP
7305 if (cls_table_is_catchall(t)) {
7306 catchall = t;
7307 } else if (!other) {
7308 other = t;
7309 } else {
7310 /* Indicate that we can't tag this by setting both tables to
7311 * NULL. (We know that 'catchall' is already NULL.) */
7312 other = NULL;
7313 }
7314 }
7315 break;
7316
7317 default:
7318 /* Can't tag this table. */
7319 break;
7320 }
7321
7322 if (table->catchall_table != catchall || table->other_table != other) {
7323 table->catchall_table = catchall;
7324 table->other_table = other;
2cc3c58e 7325 ofproto->backer->need_revalidate = REV_FLOW_TABLE;
54a9cbc9
BP
7326 }
7327}
7328
7329/* Given 'rule' that has changed in some way (either it is a rule being
7330 * inserted, a rule being deleted, or a rule whose actions are being
7331 * modified), marks facets for revalidation to ensure that packets will be
7332 * forwarded correctly according to the new state of the flow table.
7333 *
7334 * This function must be called after *each* change to a flow table. See
7335 * the comment on table_update_taggable() for more information. */
7336static void
7337rule_invalidate(const struct rule_dpif *rule)
7338{
7339 struct ofproto_dpif *ofproto = ofproto_dpif_cast(rule->up.ofproto);
7340
7341 table_update_taggable(ofproto, rule->up.table_id);
7342
2cc3c58e 7343 if (!ofproto->backer->need_revalidate) {
54a9cbc9
BP
7344 struct table_dpif *table = &ofproto->tables[rule->up.table_id];
7345
7346 if (table->other_table && rule->tag) {
2cc3c58e 7347 tag_set_add(&ofproto->backer->revalidate_set, rule->tag);
54a9cbc9 7348 } else {
2cc3c58e 7349 ofproto->backer->need_revalidate = REV_FLOW_TABLE;
54a9cbc9
BP
7350 }
7351 }
7352}
7353\f
abe529af 7354static bool
7257b535
BP
7355set_frag_handling(struct ofproto *ofproto_,
7356 enum ofp_config_flags frag_handling)
abe529af
BP
7357{
7358 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
7257b535 7359 if (frag_handling != OFPC_FRAG_REASM) {
2cc3c58e 7360 ofproto->backer->need_revalidate = REV_RECONFIGURE;
7257b535
BP
7361 return true;
7362 } else {
7363 return false;
7364 }
abe529af
BP
7365}
7366
90bf1e07 7367static enum ofperr
abe529af
BP
7368packet_out(struct ofproto *ofproto_, struct ofpbuf *packet,
7369 const struct flow *flow,
f25d0cf3 7370 const struct ofpact *ofpacts, size_t ofpacts_len)
abe529af
BP
7371{
7372 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
548de4dd
BP
7373 struct odputil_keybuf keybuf;
7374 struct dpif_flow_stats stats;
abe529af 7375
548de4dd 7376 struct ofpbuf key;
112bc5f4 7377
548de4dd
BP
7378 struct action_xlate_ctx ctx;
7379 uint64_t odp_actions_stub[1024 / 8];
7380 struct ofpbuf odp_actions;
80e5eed9 7381
548de4dd 7382 ofpbuf_use_stack(&key, &keybuf, sizeof keybuf);
e1b1d06a
JP
7383 odp_flow_key_from_flow(&key, flow,
7384 ofp_port_to_odp_port(ofproto, flow->in_port));
050ac423 7385
548de4dd 7386 dpif_flow_stats_extract(flow, packet, time_msec(), &stats);
abe529af 7387
548de4dd
BP
7388 action_xlate_ctx_init(&ctx, ofproto, flow, flow->vlan_tci, NULL,
7389 packet_get_tcp_flags(packet, flow), packet);
7390 ctx.resubmit_stats = &stats;
2284188b 7391
548de4dd
BP
7392 ofpbuf_use_stub(&odp_actions,
7393 odp_actions_stub, sizeof odp_actions_stub);
7394 xlate_actions(&ctx, ofpacts, ofpacts_len, &odp_actions);
acf60855 7395 dpif_execute(ofproto->backer->dpif, key.data, key.size,
548de4dd
BP
7396 odp_actions.data, odp_actions.size, packet);
7397 ofpbuf_uninit(&odp_actions);
2284188b 7398
548de4dd 7399 return 0;
abe529af 7400}
6fca1ffb
BP
7401\f
7402/* NetFlow. */
7403
7404static int
7405set_netflow(struct ofproto *ofproto_,
7406 const struct netflow_options *netflow_options)
7407{
7408 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
7409
7410 if (netflow_options) {
7411 if (!ofproto->netflow) {
7412 ofproto->netflow = netflow_create();
7413 }
7414 return netflow_set_options(ofproto->netflow, netflow_options);
7415 } else {
7416 netflow_destroy(ofproto->netflow);
7417 ofproto->netflow = NULL;
7418 return 0;
7419 }
7420}
abe529af
BP
7421
7422static void
7423get_netflow_ids(const struct ofproto *ofproto_,
7424 uint8_t *engine_type, uint8_t *engine_id)
7425{
7426 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
7427
acf60855 7428 dpif_get_netflow_ids(ofproto->backer->dpif, engine_type, engine_id);
abe529af 7429}
6fca1ffb
BP
7430
7431static void
7432send_active_timeout(struct ofproto_dpif *ofproto, struct facet *facet)
7433{
7434 if (!facet_is_controller_flow(facet) &&
7435 netflow_active_timeout_expired(ofproto->netflow, &facet->nf_flow)) {
b0f7b9b5 7436 struct subfacet *subfacet;
6fca1ffb
BP
7437 struct ofexpired expired;
7438
b0f7b9b5 7439 LIST_FOR_EACH (subfacet, list_node, &facet->subfacets) {
6a7e895f 7440 if (subfacet->path == SF_FAST_PATH) {
b0f7b9b5 7441 struct dpif_flow_stats stats;
6fca1ffb 7442
6a7e895f 7443 subfacet_reinstall(subfacet, &stats);
15baa734 7444 subfacet_update_stats(subfacet, &stats);
b0f7b9b5 7445 }
6fca1ffb
BP
7446 }
7447
7448 expired.flow = facet->flow;
7449 expired.packet_count = facet->packet_count;
7450 expired.byte_count = facet->byte_count;
7451 expired.used = facet->used;
7452 netflow_expire(ofproto->netflow, &facet->nf_flow, &expired);
7453 }
7454}
7455
7456static void
7457send_netflow_active_timeouts(struct ofproto_dpif *ofproto)
7458{
7459 struct facet *facet;
7460
7461 HMAP_FOR_EACH (facet, hmap_node, &ofproto->facets) {
7462 send_active_timeout(ofproto, facet);
7463 }
7464}
abe529af
BP
7465\f
7466static struct ofproto_dpif *
7467ofproto_dpif_lookup(const char *name)
7468{
b44a10b7
BP
7469 struct ofproto_dpif *ofproto;
7470
7471 HMAP_FOR_EACH_WITH_HASH (ofproto, all_ofproto_dpifs_node,
7472 hash_string(name, 0), &all_ofproto_dpifs) {
7473 if (!strcmp(ofproto->up.name, name)) {
7474 return ofproto;
7475 }
7476 }
7477 return NULL;
abe529af
BP
7478}
7479
f0a3aa2e 7480static void
96e466a3 7481ofproto_unixctl_fdb_flush(struct unixctl_conn *conn, int argc,
0e15264f 7482 const char *argv[], void *aux OVS_UNUSED)
f0a3aa2e 7483{
490df1ef 7484 struct ofproto_dpif *ofproto;
f0a3aa2e 7485
96e466a3
EJ
7486 if (argc > 1) {
7487 ofproto = ofproto_dpif_lookup(argv[1]);
7488 if (!ofproto) {
bde9f75d 7489 unixctl_command_reply_error(conn, "no such bridge");
96e466a3
EJ
7490 return;
7491 }
2cc3c58e 7492 mac_learning_flush(ofproto->ml, &ofproto->backer->revalidate_set);
96e466a3
EJ
7493 } else {
7494 HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_node, &all_ofproto_dpifs) {
2cc3c58e 7495 mac_learning_flush(ofproto->ml, &ofproto->backer->revalidate_set);
96e466a3 7496 }
f0a3aa2e 7497 }
f0a3aa2e 7498
bde9f75d 7499 unixctl_command_reply(conn, "table successfully flushed");
f0a3aa2e
AA
7500}
7501
abe529af 7502static void
0e15264f
BP
7503ofproto_unixctl_fdb_show(struct unixctl_conn *conn, int argc OVS_UNUSED,
7504 const char *argv[], void *aux OVS_UNUSED)
abe529af
BP
7505{
7506 struct ds ds = DS_EMPTY_INITIALIZER;
7507 const struct ofproto_dpif *ofproto;
7508 const struct mac_entry *e;
7509
0e15264f 7510 ofproto = ofproto_dpif_lookup(argv[1]);
abe529af 7511 if (!ofproto) {
bde9f75d 7512 unixctl_command_reply_error(conn, "no such bridge");
abe529af
BP
7513 return;
7514 }
7515
7516 ds_put_cstr(&ds, " port VLAN MAC Age\n");
7517 LIST_FOR_EACH (e, lru_node, &ofproto->ml->lrus) {
7518 struct ofbundle *bundle = e->port.p;
7519 ds_put_format(&ds, "%5d %4d "ETH_ADDR_FMT" %3d\n",
7520 ofbundle_get_a_port(bundle)->odp_port,
e764773c
BP
7521 e->vlan, ETH_ADDR_ARGS(e->mac),
7522 mac_entry_age(ofproto->ml, e));
abe529af 7523 }
bde9f75d 7524 unixctl_command_reply(conn, ds_cstr(&ds));
abe529af
BP
7525 ds_destroy(&ds);
7526}
7527
6a6455e5 7528struct trace_ctx {
abe529af
BP
7529 struct action_xlate_ctx ctx;
7530 struct flow flow;
7531 struct ds *result;
7532};
7533
7534static void
29901626
BP
7535trace_format_rule(struct ds *result, uint8_t table_id, int level,
7536 const struct rule_dpif *rule)
abe529af
BP
7537{
7538 ds_put_char_multiple(result, '\t', level);
7539 if (!rule) {
7540 ds_put_cstr(result, "No match\n");
7541 return;
7542 }
7543
29901626
BP
7544 ds_put_format(result, "Rule: table=%"PRIu8" cookie=%#"PRIx64" ",
7545 table_id, ntohll(rule->up.flow_cookie));
79feb7df 7546 cls_rule_format(&rule->up.cr, result);
abe529af
BP
7547 ds_put_char(result, '\n');
7548
7549 ds_put_char_multiple(result, '\t', level);
7550 ds_put_cstr(result, "OpenFlow ");
f25d0cf3 7551 ofpacts_format(rule->up.ofpacts, rule->up.ofpacts_len, result);
abe529af
BP
7552 ds_put_char(result, '\n');
7553}
7554
7555static void
7556trace_format_flow(struct ds *result, int level, const char *title,
6a6455e5 7557 struct trace_ctx *trace)
abe529af
BP
7558{
7559 ds_put_char_multiple(result, '\t', level);
7560 ds_put_format(result, "%s: ", title);
7561 if (flow_equal(&trace->ctx.flow, &trace->flow)) {
7562 ds_put_cstr(result, "unchanged");
7563 } else {
7564 flow_format(result, &trace->ctx.flow);
7565 trace->flow = trace->ctx.flow;
7566 }
7567 ds_put_char(result, '\n');
7568}
7569
eb9e1c26
EJ
7570static void
7571trace_format_regs(struct ds *result, int level, const char *title,
6a6455e5 7572 struct trace_ctx *trace)
eb9e1c26
EJ
7573{
7574 size_t i;
7575
7576 ds_put_char_multiple(result, '\t', level);
7577 ds_put_format(result, "%s:", title);
7578 for (i = 0; i < FLOW_N_REGS; i++) {
7579 ds_put_format(result, " reg%zu=0x%"PRIx32, i, trace->flow.regs[i]);
7580 }
7581 ds_put_char(result, '\n');
7582}
7583
1ed8d352
EJ
7584static void
7585trace_format_odp(struct ds *result, int level, const char *title,
6a6455e5 7586 struct trace_ctx *trace)
1ed8d352
EJ
7587{
7588 struct ofpbuf *odp_actions = trace->ctx.odp_actions;
7589
7590 ds_put_char_multiple(result, '\t', level);
7591 ds_put_format(result, "%s: ", title);
7592 format_odp_actions(result, odp_actions->data, odp_actions->size);
7593 ds_put_char(result, '\n');
7594}
7595
abe529af
BP
7596static void
7597trace_resubmit(struct action_xlate_ctx *ctx, struct rule_dpif *rule)
7598{
6a6455e5 7599 struct trace_ctx *trace = CONTAINER_OF(ctx, struct trace_ctx, ctx);
abe529af
BP
7600 struct ds *result = trace->result;
7601
7602 ds_put_char(result, '\n');
7603 trace_format_flow(result, ctx->recurse + 1, "Resubmitted flow", trace);
eb9e1c26 7604 trace_format_regs(result, ctx->recurse + 1, "Resubmitted regs", trace);
1ed8d352 7605 trace_format_odp(result, ctx->recurse + 1, "Resubmitted odp", trace);
29901626 7606 trace_format_rule(result, ctx->table_id, ctx->recurse + 1, rule);
abe529af
BP
7607}
7608
479df176
BP
7609static void
7610trace_report(struct action_xlate_ctx *ctx, const char *s)
7611{
7612 struct trace_ctx *trace = CONTAINER_OF(ctx, struct trace_ctx, ctx);
7613 struct ds *result = trace->result;
7614
7615 ds_put_char_multiple(result, '\t', ctx->recurse);
7616 ds_put_cstr(result, s);
7617 ds_put_char(result, '\n');
7618}
7619
abe529af 7620static void
0e15264f 7621ofproto_unixctl_trace(struct unixctl_conn *conn, int argc, const char *argv[],
abe529af
BP
7622 void *aux OVS_UNUSED)
7623{
0e15264f 7624 const char *dpname = argv[1];
abe529af 7625 struct ofproto_dpif *ofproto;
876b0e1c
BP
7626 struct ofpbuf odp_key;
7627 struct ofpbuf *packet;
e84173dc 7628 ovs_be16 initial_tci;
abe529af
BP
7629 struct ds result;
7630 struct flow flow;
abe529af
BP
7631 char *s;
7632
876b0e1c
BP
7633 packet = NULL;
7634 ofpbuf_init(&odp_key, 0);
abe529af
BP
7635 ds_init(&result);
7636
e84173dc
BP
7637 ofproto = ofproto_dpif_lookup(dpname);
7638 if (!ofproto) {
bde9f75d
EJ
7639 unixctl_command_reply_error(conn, "Unknown ofproto (use ofproto/list "
7640 "for help)");
e84173dc
BP
7641 goto exit;
7642 }
0e15264f 7643 if (argc == 3 || (argc == 4 && !strcmp(argv[3], "-generate"))) {
8b3b8dd1 7644 /* ofproto/trace dpname flow [-generate] */
0e15264f
BP
7645 const char *flow_s = argv[2];
7646 const char *generate_s = argv[3];
876b0e1c 7647
31a19d69
BP
7648 /* Allow 'flow_s' to be either a datapath flow or an OpenFlow-like
7649 * flow. We guess which type it is based on whether 'flow_s' contains
7650 * an '(', since a datapath flow always contains '(') but an
7651 * OpenFlow-like flow should not (in fact it's allowed but I believe
7652 * that's not documented anywhere).
7653 *
7654 * An alternative would be to try to parse 'flow_s' both ways, but then
7655 * it would be tricky giving a sensible error message. After all, do
7656 * you just say "syntax error" or do you present both error messages?
7657 * Both choices seem lousy. */
7658 if (strchr(flow_s, '(')) {
7659 int error;
7660
7661 /* Convert string to datapath key. */
7662 ofpbuf_init(&odp_key, 0);
7663 error = odp_flow_key_from_string(flow_s, NULL, &odp_key);
7664 if (error) {
7665 unixctl_command_reply_error(conn, "Bad flow syntax");
7666 goto exit;
7667 }
876b0e1c 7668
e09ee259
EJ
7669 /* XXX: Since we allow the user to specify an ofproto, it's
7670 * possible they will specify a different ofproto than the one the
7671 * port actually belongs too. Ideally we should simply remove the
7672 * ability to specify the ofproto. */
7673 if (ofproto_receive(ofproto->backer, NULL, odp_key.data,
7674 odp_key.size, &flow, NULL, NULL, NULL,
7675 &initial_tci)) {
31a19d69
BP
7676 unixctl_command_reply_error(conn, "Invalid flow");
7677 goto exit;
7678 }
7679 } else {
7680 char *error_s;
7681
7682 error_s = parse_ofp_exact_flow(&flow, argv[2]);
7683 if (error_s) {
7684 unixctl_command_reply_error(conn, error_s);
7685 free(error_s);
7686 goto exit;
7687 }
7688
7689 initial_tci = flow.vlan_tci;
876b0e1c 7690 }
8b3b8dd1
BP
7691
7692 /* Generate a packet, if requested. */
0e15264f 7693 if (generate_s) {
8b3b8dd1
BP
7694 packet = ofpbuf_new(0);
7695 flow_compose(packet, &flow);
7696 }
72e8bf28
AA
7697 } else if (argc == 7) {
7698 /* ofproto/trace dpname priority tun_id in_port mark packet */
0e15264f
BP
7699 const char *priority_s = argv[2];
7700 const char *tun_id_s = argv[3];
7701 const char *in_port_s = argv[4];
72e8bf28
AA
7702 const char *mark_s = argv[5];
7703 const char *packet_s = argv[6];
9b56fe13 7704 uint32_t in_port = atoi(in_port_s);
0e15264f
BP
7705 ovs_be64 tun_id = htonll(strtoull(tun_id_s, NULL, 0));
7706 uint32_t priority = atoi(priority_s);
72e8bf28 7707 uint32_t mark = atoi(mark_s);
e22f1753 7708 const char *msg;
0e15264f 7709
e22f1753
BP
7710 msg = eth_from_hex(packet_s, &packet);
7711 if (msg) {
bde9f75d 7712 unixctl_command_reply_error(conn, msg);
876b0e1c
BP
7713 goto exit;
7714 }
7715
7716 ds_put_cstr(&result, "Packet: ");
c499c75d 7717 s = ofp_packet_to_string(packet->data, packet->size);
876b0e1c
BP
7718 ds_put_cstr(&result, s);
7719 free(s);
7720
72e8bf28 7721 flow_extract(packet, priority, mark, NULL, in_port, &flow);
296e07ac 7722 flow.tunnel.tun_id = tun_id;
e84173dc 7723 initial_tci = flow.vlan_tci;
876b0e1c 7724 } else {
bde9f75d 7725 unixctl_command_reply_error(conn, "Bad command syntax");
abe529af
BP
7726 goto exit;
7727 }
7728
6a6455e5
EJ
7729 ofproto_trace(ofproto, &flow, packet, initial_tci, &result);
7730 unixctl_command_reply(conn, ds_cstr(&result));
7731
7732exit:
7733 ds_destroy(&result);
7734 ofpbuf_delete(packet);
7735 ofpbuf_uninit(&odp_key);
7736}
7737
7738static void
7739ofproto_trace(struct ofproto_dpif *ofproto, const struct flow *flow,
7740 const struct ofpbuf *packet, ovs_be16 initial_tci,
7741 struct ds *ds)
7742{
7743 struct rule_dpif *rule;
7744
7745 ds_put_cstr(ds, "Flow: ");
7746 flow_format(ds, flow);
7747 ds_put_char(ds, '\n');
abe529af 7748
c57b2226
BP
7749 rule = rule_dpif_lookup(ofproto, flow);
7750
6a6455e5 7751 trace_format_rule(ds, 0, 0, rule);
c57b2226
BP
7752 if (rule == ofproto->miss_rule) {
7753 ds_put_cstr(ds, "\nNo match, flow generates \"packet in\"s.\n");
7754 } else if (rule == ofproto->no_packet_in_rule) {
7755 ds_put_cstr(ds, "\nNo match, packets dropped because "
7756 "OFPPC_NO_PACKET_IN is set on in_port.\n");
7757 }
7758
abe529af 7759 if (rule) {
050ac423
BP
7760 uint64_t odp_actions_stub[1024 / 8];
7761 struct ofpbuf odp_actions;
7762
6a6455e5 7763 struct trace_ctx trace;
0e553d9c 7764 uint8_t tcp_flags;
abe529af 7765
6a6455e5
EJ
7766 tcp_flags = packet ? packet_get_tcp_flags(packet, flow) : 0;
7767 trace.result = ds;
7768 trace.flow = *flow;
050ac423
BP
7769 ofpbuf_use_stub(&odp_actions,
7770 odp_actions_stub, sizeof odp_actions_stub);
6a6455e5 7771 action_xlate_ctx_init(&trace.ctx, ofproto, flow, initial_tci,
0e553d9c 7772 rule, tcp_flags, packet);
abe529af 7773 trace.ctx.resubmit_hook = trace_resubmit;
479df176 7774 trace.ctx.report_hook = trace_report;
f25d0cf3 7775 xlate_actions(&trace.ctx, rule->up.ofpacts, rule->up.ofpacts_len,
050ac423 7776 &odp_actions);
abe529af 7777
6a6455e5
EJ
7778 ds_put_char(ds, '\n');
7779 trace_format_flow(ds, 0, "Final flow", &trace);
7780 ds_put_cstr(ds, "Datapath actions: ");
050ac423
BP
7781 format_odp_actions(ds, odp_actions.data, odp_actions.size);
7782 ofpbuf_uninit(&odp_actions);
876b0e1c 7783
6a7e895f
BP
7784 if (trace.ctx.slow) {
7785 enum slow_path_reason slow;
7786
7787 ds_put_cstr(ds, "\nThis flow is handled by the userspace "
7788 "slow path because it:");
7789 for (slow = trace.ctx.slow; slow; ) {
7790 enum slow_path_reason bit = rightmost_1bit(slow);
7791
7792 switch (bit) {
7793 case SLOW_CFM:
7794 ds_put_cstr(ds, "\n\t- Consists of CFM packets.");
7795 break;
7796 case SLOW_LACP:
7797 ds_put_cstr(ds, "\n\t- Consists of LACP packets.");
7798 break;
7799 case SLOW_STP:
7800 ds_put_cstr(ds, "\n\t- Consists of STP packets.");
7801 break;
7802 case SLOW_IN_BAND:
7803 ds_put_cstr(ds, "\n\t- Needs in-band special case "
7804 "processing.");
7805 if (!packet) {
7806 ds_put_cstr(ds, "\n\t (The datapath actions are "
7807 "incomplete--for complete actions, "
7808 "please supply a packet.)");
7809 }
7810 break;
7811 case SLOW_CONTROLLER:
7812 ds_put_cstr(ds, "\n\t- Sends \"packet-in\" messages "
7813 "to the OpenFlow controller.");
7814 break;
7815 case SLOW_MATCH:
7816 ds_put_cstr(ds, "\n\t- Needs more specific matching "
7817 "than the datapath supports.");
7818 break;
7819 }
7820
7821 slow &= ~bit;
7822 }
7823
7824 if (slow & ~SLOW_MATCH) {
7825 ds_put_cstr(ds, "\nThe datapath actions above do not reflect "
7826 "the special slow-path processing.");
876b0e1c
BP
7827 }
7828 }
abe529af 7829 }
abe529af
BP
7830}
7831
7ee20df1 7832static void
0e15264f
BP
7833ofproto_dpif_clog(struct unixctl_conn *conn OVS_UNUSED, int argc OVS_UNUSED,
7834 const char *argv[] OVS_UNUSED, void *aux OVS_UNUSED)
7ee20df1
BP
7835{
7836 clogged = true;
bde9f75d 7837 unixctl_command_reply(conn, NULL);
7ee20df1
BP
7838}
7839
7840static void
0e15264f
BP
7841ofproto_dpif_unclog(struct unixctl_conn *conn OVS_UNUSED, int argc OVS_UNUSED,
7842 const char *argv[] OVS_UNUSED, void *aux OVS_UNUSED)
7ee20df1
BP
7843{
7844 clogged = false;
bde9f75d 7845 unixctl_command_reply(conn, NULL);
7ee20df1
BP
7846}
7847
6814e51f
BP
7848/* Runs a self-check of flow translations in 'ofproto'. Appends a message to
7849 * 'reply' describing the results. */
7850static void
7851ofproto_dpif_self_check__(struct ofproto_dpif *ofproto, struct ds *reply)
7852{
7853 struct facet *facet;
7854 int errors;
7855
7856 errors = 0;
7857 HMAP_FOR_EACH (facet, hmap_node, &ofproto->facets) {
7858 if (!facet_check_consistency(facet)) {
7859 errors++;
7860 }
7861 }
7862 if (errors) {
2cc3c58e 7863 ofproto->backer->need_revalidate = REV_INCONSISTENCY;
6814e51f
BP
7864 }
7865
7866 if (errors) {
7867 ds_put_format(reply, "%s: self-check failed (%d errors)\n",
7868 ofproto->up.name, errors);
7869 } else {
7870 ds_put_format(reply, "%s: self-check passed\n", ofproto->up.name);
7871 }
7872}
7873
7874static void
7875ofproto_dpif_self_check(struct unixctl_conn *conn,
7876 int argc, const char *argv[], void *aux OVS_UNUSED)
7877{
7878 struct ds reply = DS_EMPTY_INITIALIZER;
7879 struct ofproto_dpif *ofproto;
7880
7881 if (argc > 1) {
7882 ofproto = ofproto_dpif_lookup(argv[1]);
7883 if (!ofproto) {
bde9f75d
EJ
7884 unixctl_command_reply_error(conn, "Unknown ofproto (use "
7885 "ofproto/list for help)");
6814e51f
BP
7886 return;
7887 }
7888 ofproto_dpif_self_check__(ofproto, &reply);
7889 } else {
7890 HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_node, &all_ofproto_dpifs) {
7891 ofproto_dpif_self_check__(ofproto, &reply);
7892 }
7893 }
7894
bde9f75d 7895 unixctl_command_reply(conn, ds_cstr(&reply));
6814e51f
BP
7896 ds_destroy(&reply);
7897}
7898
27022416
JP
7899/* Store the current ofprotos in 'ofproto_shash'. Returns a sorted list
7900 * of the 'ofproto_shash' nodes. It is the responsibility of the caller
7901 * to destroy 'ofproto_shash' and free the returned value. */
7902static const struct shash_node **
7903get_ofprotos(struct shash *ofproto_shash)
7904{
7905 const struct ofproto_dpif *ofproto;
7906
7907 HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_node, &all_ofproto_dpifs) {
7908 char *name = xasprintf("%s@%s", ofproto->up.type, ofproto->up.name);
7909 shash_add_nocopy(ofproto_shash, name, ofproto);
7910 }
7911
7912 return shash_sort(ofproto_shash);
7913}
7914
7915static void
7916ofproto_unixctl_dpif_dump_dps(struct unixctl_conn *conn, int argc OVS_UNUSED,
7917 const char *argv[] OVS_UNUSED,
7918 void *aux OVS_UNUSED)
7919{
7920 struct ds ds = DS_EMPTY_INITIALIZER;
7921 struct shash ofproto_shash;
7922 const struct shash_node **sorted_ofprotos;
7923 int i;
7924
7925 shash_init(&ofproto_shash);
7926 sorted_ofprotos = get_ofprotos(&ofproto_shash);
7927 for (i = 0; i < shash_count(&ofproto_shash); i++) {
7928 const struct shash_node *node = sorted_ofprotos[i];
7929 ds_put_format(&ds, "%s\n", node->name);
7930 }
7931
7932 shash_destroy(&ofproto_shash);
7933 free(sorted_ofprotos);
7934
7935 unixctl_command_reply(conn, ds_cstr(&ds));
7936 ds_destroy(&ds);
7937}
7938
7939static void
7940show_dp_format(const struct ofproto_dpif *ofproto, struct ds *ds)
7941{
7942 struct dpif_dp_stats s;
7943 const struct shash_node **ports;
7944 int i;
7945
acf60855 7946 dpif_get_dp_stats(ofproto->backer->dpif, &s);
27022416 7947
acf60855
JP
7948 ds_put_format(ds, "%s (%s):\n", ofproto->up.name,
7949 dpif_name(ofproto->backer->dpif));
7950 /* xxx It would be better to show bridge-specific stats instead
7951 * xxx of dp ones. */
27022416
JP
7952 ds_put_format(ds,
7953 "\tlookups: hit:%"PRIu64" missed:%"PRIu64" lost:%"PRIu64"\n",
7954 s.n_hit, s.n_missed, s.n_lost);
acf60855
JP
7955 ds_put_format(ds, "\tflows: %zu\n",
7956 hmap_count(&ofproto->subfacets));
27022416
JP
7957
7958 ports = shash_sort(&ofproto->up.port_by_name);
7959 for (i = 0; i < shash_count(&ofproto->up.port_by_name); i++) {
7960 const struct shash_node *node = ports[i];
7961 struct ofport *ofport = node->data;
7962 const char *name = netdev_get_name(ofport->netdev);
7963 const char *type = netdev_get_type(ofport->netdev);
0a740f48
EJ
7964 uint32_t odp_port;
7965
7966 ds_put_format(ds, "\t%s %u/", name, ofport->ofp_port);
7967
7968 odp_port = ofp_port_to_odp_port(ofproto, ofport->ofp_port);
7969 if (odp_port != OVSP_NONE) {
7970 ds_put_format(ds, "%"PRIu32":", odp_port);
7971 } else {
7972 ds_put_cstr(ds, "none:");
7973 }
27022416 7974
27022416
JP
7975 if (strcmp(type, "system")) {
7976 struct netdev *netdev;
7977 int error;
7978
7979 ds_put_format(ds, " (%s", type);
7980
7981 error = netdev_open(name, type, &netdev);
7982 if (!error) {
7983 struct smap config;
7984
7985 smap_init(&config);
7986 error = netdev_get_config(netdev, &config);
7987 if (!error) {
7988 const struct smap_node **nodes;
7989 size_t i;
7990
7991 nodes = smap_sort(&config);
7992 for (i = 0; i < smap_count(&config); i++) {
7993 const struct smap_node *node = nodes[i];
7994 ds_put_format(ds, "%c %s=%s", i ? ',' : ':',
7995 node->key, node->value);
7996 }
7997 free(nodes);
7998 }
7999 smap_destroy(&config);
8000
8001 netdev_close(netdev);
8002 }
8003 ds_put_char(ds, ')');
8004 }
8005 ds_put_char(ds, '\n');
8006 }
8007 free(ports);
8008}
8009
8010static void
8011ofproto_unixctl_dpif_show(struct unixctl_conn *conn, int argc,
8012 const char *argv[], void *aux OVS_UNUSED)
8013{
8014 struct ds ds = DS_EMPTY_INITIALIZER;
8015 const struct ofproto_dpif *ofproto;
8016
8017 if (argc > 1) {
8018 int i;
8019 for (i = 1; i < argc; i++) {
8020 ofproto = ofproto_dpif_lookup(argv[i]);
8021 if (!ofproto) {
8022 ds_put_format(&ds, "Unknown bridge %s (use dpif/dump-dps "
8023 "for help)", argv[i]);
8024 unixctl_command_reply_error(conn, ds_cstr(&ds));
8025 return;
8026 }
8027 show_dp_format(ofproto, &ds);
8028 }
8029 } else {
8030 struct shash ofproto_shash;
8031 const struct shash_node **sorted_ofprotos;
8032 int i;
8033
8034 shash_init(&ofproto_shash);
8035 sorted_ofprotos = get_ofprotos(&ofproto_shash);
8036 for (i = 0; i < shash_count(&ofproto_shash); i++) {
8037 const struct shash_node *node = sorted_ofprotos[i];
8038 show_dp_format(node->data, &ds);
8039 }
8040
8041 shash_destroy(&ofproto_shash);
8042 free(sorted_ofprotos);
8043 }
8044
8045 unixctl_command_reply(conn, ds_cstr(&ds));
8046 ds_destroy(&ds);
8047}
8048
8049static void
8050ofproto_unixctl_dpif_dump_flows(struct unixctl_conn *conn,
8051 int argc OVS_UNUSED, const char *argv[],
8052 void *aux OVS_UNUSED)
8053{
8054 struct ds ds = DS_EMPTY_INITIALIZER;
8055 const struct ofproto_dpif *ofproto;
8056 struct subfacet *subfacet;
8057
8058 ofproto = ofproto_dpif_lookup(argv[1]);
8059 if (!ofproto) {
8060 unixctl_command_reply_error(conn, "no such bridge");
8061 return;
8062 }
8063
af37354d
EJ
8064 update_stats(ofproto->backer);
8065
27022416
JP
8066 HMAP_FOR_EACH (subfacet, hmap_node, &ofproto->subfacets) {
8067 struct odputil_keybuf keybuf;
8068 struct ofpbuf key;
8069
8070 subfacet_get_key(subfacet, &keybuf, &key);
8071 odp_flow_key_format(key.data, key.size, &ds);
8072
8073 ds_put_format(&ds, ", packets:%"PRIu64", bytes:%"PRIu64", used:",
8074 subfacet->dp_packet_count, subfacet->dp_byte_count);
8075 if (subfacet->used) {
8076 ds_put_format(&ds, "%.3fs",
8077 (time_msec() - subfacet->used) / 1000.0);
8078 } else {
8079 ds_put_format(&ds, "never");
8080 }
8081 if (subfacet->facet->tcp_flags) {
8082 ds_put_cstr(&ds, ", flags:");
8083 packet_format_tcp_flags(&ds, subfacet->facet->tcp_flags);
8084 }
8085
8086 ds_put_cstr(&ds, ", actions:");
8087 format_odp_actions(&ds, subfacet->actions, subfacet->actions_len);
8088 ds_put_char(&ds, '\n');
8089 }
8090
8091 unixctl_command_reply(conn, ds_cstr(&ds));
8092 ds_destroy(&ds);
8093}
8094
8095static void
8096ofproto_unixctl_dpif_del_flows(struct unixctl_conn *conn,
8097 int argc OVS_UNUSED, const char *argv[],
8098 void *aux OVS_UNUSED)
8099{
8100 struct ds ds = DS_EMPTY_INITIALIZER;
8101 struct ofproto_dpif *ofproto;
8102
8103 ofproto = ofproto_dpif_lookup(argv[1]);
8104 if (!ofproto) {
8105 unixctl_command_reply_error(conn, "no such bridge");
8106 return;
8107 }
8108
8109 flush(&ofproto->up);
8110
8111 unixctl_command_reply(conn, ds_cstr(&ds));
8112 ds_destroy(&ds);
8113}
8114
abe529af
BP
8115static void
8116ofproto_dpif_unixctl_init(void)
8117{
8118 static bool registered;
8119 if (registered) {
8120 return;
8121 }
8122 registered = true;
8123
0e15264f
BP
8124 unixctl_command_register(
8125 "ofproto/trace",
72e8bf28
AA
8126 "bridge {priority tun_id in_port mark packet | odp_flow [-generate]}",
8127 2, 6, ofproto_unixctl_trace, NULL);
96e466a3 8128 unixctl_command_register("fdb/flush", "[bridge]", 0, 1,
0e15264f
BP
8129 ofproto_unixctl_fdb_flush, NULL);
8130 unixctl_command_register("fdb/show", "bridge", 1, 1,
8131 ofproto_unixctl_fdb_show, NULL);
8132 unixctl_command_register("ofproto/clog", "", 0, 0,
8133 ofproto_dpif_clog, NULL);
8134 unixctl_command_register("ofproto/unclog", "", 0, 0,
8135 ofproto_dpif_unclog, NULL);
6814e51f
BP
8136 unixctl_command_register("ofproto/self-check", "[bridge]", 0, 1,
8137 ofproto_dpif_self_check, NULL);
27022416
JP
8138 unixctl_command_register("dpif/dump-dps", "", 0, 0,
8139 ofproto_unixctl_dpif_dump_dps, NULL);
8140 unixctl_command_register("dpif/show", "[bridge]", 0, INT_MAX,
8141 ofproto_unixctl_dpif_show, NULL);
8142 unixctl_command_register("dpif/dump-flows", "bridge", 1, 1,
8143 ofproto_unixctl_dpif_dump_flows, NULL);
8144 unixctl_command_register("dpif/del-flows", "bridge", 1, 1,
8145 ofproto_unixctl_dpif_del_flows, NULL);
abe529af
BP
8146}
8147\f
52a90c29
BP
8148/* Linux VLAN device support (e.g. "eth0.10" for VLAN 10.)
8149 *
8150 * This is deprecated. It is only for compatibility with broken device drivers
8151 * in old versions of Linux that do not properly support VLANs when VLAN
8152 * devices are not used. When broken device drivers are no longer in
8153 * widespread use, we will delete these interfaces. */
8154
8155static int
8156set_realdev(struct ofport *ofport_, uint16_t realdev_ofp_port, int vid)
8157{
8158 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofport_->ofproto);
8159 struct ofport_dpif *ofport = ofport_dpif_cast(ofport_);
8160
8161 if (realdev_ofp_port == ofport->realdev_ofp_port
8162 && vid == ofport->vlandev_vid) {
8163 return 0;
8164 }
8165
2cc3c58e 8166 ofproto->backer->need_revalidate = REV_RECONFIGURE;
52a90c29
BP
8167
8168 if (ofport->realdev_ofp_port) {
8169 vsp_remove(ofport);
8170 }
8171 if (realdev_ofp_port && ofport->bundle) {
8172 /* vlandevs are enslaved to their realdevs, so they are not allowed to
8173 * themselves be part of a bundle. */
8174 bundle_set(ofport->up.ofproto, ofport->bundle, NULL);
8175 }
8176
8177 ofport->realdev_ofp_port = realdev_ofp_port;
8178 ofport->vlandev_vid = vid;
8179
8180 if (realdev_ofp_port) {
8181 vsp_add(ofport, realdev_ofp_port, vid);
8182 }
8183
8184 return 0;
8185}
8186
8187static uint32_t
8188hash_realdev_vid(uint16_t realdev_ofp_port, int vid)
8189{
8190 return hash_2words(realdev_ofp_port, vid);
8191}
8192
40e05935
BP
8193/* Returns the ODP port number of the Linux VLAN device that corresponds to
8194 * 'vlan_tci' on the network device with port number 'realdev_odp_port' in
8195 * 'ofproto'. For example, given 'realdev_odp_port' of eth0 and 'vlan_tci' 9,
8196 * it would return the port number of eth0.9.
8197 *
8198 * Unless VLAN splinters are enabled for port 'realdev_odp_port', this
8199 * function just returns its 'realdev_odp_port' argument. */
52a90c29
BP
8200static uint32_t
8201vsp_realdev_to_vlandev(const struct ofproto_dpif *ofproto,
8202 uint32_t realdev_odp_port, ovs_be16 vlan_tci)
8203{
8204 if (!hmap_is_empty(&ofproto->realdev_vid_map)) {
e1b1d06a 8205 uint16_t realdev_ofp_port;
52a90c29
BP
8206 int vid = vlan_tci_to_vid(vlan_tci);
8207 const struct vlan_splinter *vsp;
8208
e1b1d06a 8209 realdev_ofp_port = odp_port_to_ofp_port(ofproto, realdev_odp_port);
52a90c29
BP
8210 HMAP_FOR_EACH_WITH_HASH (vsp, realdev_vid_node,
8211 hash_realdev_vid(realdev_ofp_port, vid),
8212 &ofproto->realdev_vid_map) {
8213 if (vsp->realdev_ofp_port == realdev_ofp_port
8214 && vsp->vid == vid) {
e1b1d06a 8215 return ofp_port_to_odp_port(ofproto, vsp->vlandev_ofp_port);
52a90c29
BP
8216 }
8217 }
8218 }
8219 return realdev_odp_port;
8220}
8221
8222static struct vlan_splinter *
8223vlandev_find(const struct ofproto_dpif *ofproto, uint16_t vlandev_ofp_port)
8224{
8225 struct vlan_splinter *vsp;
8226
8227 HMAP_FOR_EACH_WITH_HASH (vsp, vlandev_node, hash_int(vlandev_ofp_port, 0),
8228 &ofproto->vlandev_map) {
8229 if (vsp->vlandev_ofp_port == vlandev_ofp_port) {
8230 return vsp;
8231 }
8232 }
8233
8234 return NULL;
8235}
8236
40e05935
BP
8237/* Returns the OpenFlow port number of the "real" device underlying the Linux
8238 * VLAN device with OpenFlow port number 'vlandev_ofp_port' and stores the
8239 * VLAN VID of the Linux VLAN device in '*vid'. For example, given
8240 * 'vlandev_ofp_port' of eth0.9, it would return the OpenFlow port number of
8241 * eth0 and store 9 in '*vid'.
8242 *
8243 * Returns 0 and does not modify '*vid' if 'vlandev_ofp_port' is not a Linux
8244 * VLAN device. Unless VLAN splinters are enabled, this is what this function
8245 * always does.*/
52a90c29
BP
8246static uint16_t
8247vsp_vlandev_to_realdev(const struct ofproto_dpif *ofproto,
40e05935 8248 uint16_t vlandev_ofp_port, int *vid)
52a90c29
BP
8249{
8250 if (!hmap_is_empty(&ofproto->vlandev_map)) {
8251 const struct vlan_splinter *vsp;
8252
8253 vsp = vlandev_find(ofproto, vlandev_ofp_port);
8254 if (vsp) {
8255 if (vid) {
8256 *vid = vsp->vid;
8257 }
8258 return vsp->realdev_ofp_port;
8259 }
8260 }
8261 return 0;
8262}
8263
b98d8985
BP
8264/* Given 'flow', a flow representing a packet received on 'ofproto', checks
8265 * whether 'flow->in_port' represents a Linux VLAN device. If so, changes
8266 * 'flow->in_port' to the "real" device backing the VLAN device, sets
8267 * 'flow->vlan_tci' to the VLAN VID, and returns true. Otherwise (which is
8268 * always the case unless VLAN splinters are enabled), returns false without
8269 * making any changes. */
8270static bool
8271vsp_adjust_flow(const struct ofproto_dpif *ofproto, struct flow *flow)
8272{
8273 uint16_t realdev;
8274 int vid;
8275
8276 realdev = vsp_vlandev_to_realdev(ofproto, flow->in_port, &vid);
8277 if (!realdev) {
8278 return false;
8279 }
8280
8281 /* Cause the flow to be processed as if it came in on the real device with
8282 * the VLAN device's VLAN ID. */
8283 flow->in_port = realdev;
8284 flow->vlan_tci = htons((vid & VLAN_VID_MASK) | VLAN_CFI);
8285 return true;
8286}
8287
52a90c29
BP
8288static void
8289vsp_remove(struct ofport_dpif *port)
8290{
8291 struct ofproto_dpif *ofproto = ofproto_dpif_cast(port->up.ofproto);
8292 struct vlan_splinter *vsp;
8293
8294 vsp = vlandev_find(ofproto, port->up.ofp_port);
8295 if (vsp) {
8296 hmap_remove(&ofproto->vlandev_map, &vsp->vlandev_node);
8297 hmap_remove(&ofproto->realdev_vid_map, &vsp->realdev_vid_node);
8298 free(vsp);
8299
8300 port->realdev_ofp_port = 0;
8301 } else {
8302 VLOG_ERR("missing vlan device record");
8303 }
8304}
8305
8306static void
8307vsp_add(struct ofport_dpif *port, uint16_t realdev_ofp_port, int vid)
8308{
8309 struct ofproto_dpif *ofproto = ofproto_dpif_cast(port->up.ofproto);
8310
8311 if (!vsp_vlandev_to_realdev(ofproto, port->up.ofp_port, NULL)
8312 && (vsp_realdev_to_vlandev(ofproto, realdev_ofp_port, htons(vid))
8313 == realdev_ofp_port)) {
8314 struct vlan_splinter *vsp;
8315
8316 vsp = xmalloc(sizeof *vsp);
8317 hmap_insert(&ofproto->vlandev_map, &vsp->vlandev_node,
8318 hash_int(port->up.ofp_port, 0));
8319 hmap_insert(&ofproto->realdev_vid_map, &vsp->realdev_vid_node,
8320 hash_realdev_vid(realdev_ofp_port, vid));
8321 vsp->realdev_ofp_port = realdev_ofp_port;
8322 vsp->vlandev_ofp_port = port->up.ofp_port;
8323 vsp->vid = vid;
8324
8325 port->realdev_ofp_port = realdev_ofp_port;
8326 } else {
8327 VLOG_ERR("duplicate vlan device record");
8328 }
8329}
e1b1d06a
JP
8330
8331static uint32_t
8332ofp_port_to_odp_port(const struct ofproto_dpif *ofproto, uint16_t ofp_port)
8333{
8334 const struct ofport_dpif *ofport = get_ofp_port(ofproto, ofp_port);
8335 return ofport ? ofport->odp_port : OVSP_NONE;
8336}
8337
acf60855
JP
8338static struct ofport_dpif *
8339odp_port_to_ofport(const struct dpif_backer *backer, uint32_t odp_port)
e1b1d06a
JP
8340{
8341 struct ofport_dpif *port;
8342
8343 HMAP_FOR_EACH_IN_BUCKET (port, odp_port_node,
8344 hash_int(odp_port, 0),
acf60855 8345 &backer->odp_to_ofport_map) {
e1b1d06a 8346 if (port->odp_port == odp_port) {
acf60855 8347 return port;
e1b1d06a
JP
8348 }
8349 }
8350
acf60855
JP
8351 return NULL;
8352}
8353
8354static uint16_t
8355odp_port_to_ofp_port(const struct ofproto_dpif *ofproto, uint32_t odp_port)
8356{
8357 struct ofport_dpif *port;
8358
8359 port = odp_port_to_ofport(ofproto->backer, odp_port);
6472ba11 8360 if (port && &ofproto->up == port->up.ofproto) {
acf60855
JP
8361 return port->up.ofp_port;
8362 } else {
8363 return OFPP_NONE;
8364 }
e1b1d06a
JP
8365}
8366
abe529af 8367const struct ofproto_class ofproto_dpif_class = {
b0408fca 8368 init,
abe529af
BP
8369 enumerate_types,
8370 enumerate_names,
8371 del,
0aeaabc8 8372 port_open_type,
acf60855
JP
8373 type_run,
8374 type_run_fast,
8375 type_wait,
abe529af
BP
8376 alloc,
8377 construct,
8378 destruct,
8379 dealloc,
8380 run,
5fcc0d00 8381 run_fast,
abe529af 8382 wait,
0d085684 8383 get_memory_usage,
abe529af 8384 flush,
6c1491fb
BP
8385 get_features,
8386 get_tables,
abe529af
BP
8387 port_alloc,
8388 port_construct,
8389 port_destruct,
8390 port_dealloc,
8391 port_modified,
8392 port_reconfigured,
8393 port_query_by_name,
8394 port_add,
8395 port_del,
6527c598 8396 port_get_stats,
abe529af
BP
8397 port_dump_start,
8398 port_dump_next,
8399 port_dump_done,
8400 port_poll,
8401 port_poll_wait,
8402 port_is_lacp_current,
0ab6decf 8403 NULL, /* rule_choose_table */
abe529af
BP
8404 rule_alloc,
8405 rule_construct,
8406 rule_destruct,
8407 rule_dealloc,
abe529af
BP
8408 rule_get_stats,
8409 rule_execute,
8410 rule_modify_actions,
7257b535 8411 set_frag_handling,
abe529af
BP
8412 packet_out,
8413 set_netflow,
8414 get_netflow_ids,
8415 set_sflow,
8416 set_cfm,
a5610457 8417 get_cfm_fault,
1c0333b6 8418 get_cfm_opup,
1de11730 8419 get_cfm_remote_mpids,
3967a833 8420 get_cfm_health,
21f7563c
JP
8421 set_stp,
8422 get_stp_status,
8423 set_stp_port,
8424 get_stp_port_status,
8b36f51e 8425 set_queues,
abe529af
BP
8426 bundle_set,
8427 bundle_remove,
8428 mirror_set,
9d24de3b 8429 mirror_get_stats,
abe529af
BP
8430 set_flood_vlans,
8431 is_mirror_output_bundle,
8402c74b 8432 forward_bpdu_changed,
c4069512 8433 set_mac_table_config,
52a90c29 8434 set_realdev,
abe529af 8435};