]> git.proxmox.com Git - ovs.git/blame - ofproto/ofproto-dpif-xlate.c
datapath: Add force commit.
[ovs.git] / ofproto / ofproto-dpif-xlate.c
CommitLineData
b827b231 1/* Copyright (c) 2009, 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017 Nicira, Inc.
9583bc14
EJ
2 *
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License. */
14
15#include <config.h>
16
17#include "ofproto/ofproto-dpif-xlate.h"
18
8449c4d6 19#include <errno.h>
a36de779
PS
20#include <arpa/inet.h>
21#include <net/if.h>
22#include <sys/socket.h>
23#include <netinet/in.h>
8449c4d6 24
db7d4e46 25#include "bfd.h"
9583bc14
EJ
26#include "bitmap.h"
27#include "bond.h"
28#include "bundle.h"
29#include "byte-order.h"
db7d4e46 30#include "cfm.h"
9583bc14
EJ
31#include "connmgr.h"
32#include "coverage.h"
46445c63 33#include "csum.h"
e14deea0 34#include "dp-packet.h"
9583bc14 35#include "dpif.h"
f7f1ea29 36#include "in-band.h"
db7d4e46 37#include "lacp.h"
9583bc14
EJ
38#include "learn.h"
39#include "mac-learning.h"
6d95c4e8 40#include "mcast-snooping.h"
9583bc14
EJ
41#include "multipath.h"
42#include "netdev-vport.h"
43#include "netlink.h"
44#include "nx-match.h"
45#include "odp-execute.h"
9583bc14 46#include "ofproto/ofproto-dpif-ipfix.h"
ec7ceaed 47#include "ofproto/ofproto-dpif-mirror.h"
60d02c72 48#include "ofproto/ofproto-dpif-monitor.h"
9583bc14 49#include "ofproto/ofproto-dpif-sflow.h"
2d9b49dd 50#include "ofproto/ofproto-dpif-trace.h"
901a517e 51#include "ofproto/ofproto-dpif-xlate-cache.h"
9583bc14 52#include "ofproto/ofproto-dpif.h"
6f00e29b 53#include "ofproto/ofproto-provider.h"
b598f214
BW
54#include "openvswitch/dynamic-string.h"
55#include "openvswitch/meta-flow.h"
56#include "openvswitch/list.h"
57#include "openvswitch/ofp-actions.h"
58#include "openvswitch/vlog.h"
59#include "ovs-lldp.h"
a36de779 60#include "ovs-router.h"
b598f214
BW
61#include "packets.h"
62#include "tnl-neigh-cache.h"
a36de779 63#include "tnl-ports.h"
9583bc14 64#include "tunnel.h"
ee89ea7b 65#include "util.h"
9583bc14 66
46c88433 67COVERAGE_DEFINE(xlate_actions);
0f032e95 68COVERAGE_DEFINE(xlate_actions_oversize);
7d031d7e 69COVERAGE_DEFINE(xlate_actions_too_many_output);
9583bc14
EJ
70
71VLOG_DEFINE_THIS_MODULE(ofproto_dpif_xlate);
72
8a553e9a 73/* Maximum depth of flow table recursion (due to resubmit actions) in a
790c5d26
BP
74 * flow translation.
75 *
76 * The goal of limiting the depth of resubmits is to ensure that flow
77 * translation eventually terminates. Only resubmits to the same table or an
78 * earlier table count against the maximum depth. This is because resubmits to
79 * strictly monotonically increasing table IDs will eventually terminate, since
80 * any OpenFlow switch has a finite number of tables. OpenFlow tables are most
81 * commonly traversed in numerically increasing order, so this limit has little
82 * effect on conventionally designed OpenFlow pipelines.
83 *
84 * Outputs to patch ports and to groups also count against the depth limit. */
85#define MAX_DEPTH 64
8a553e9a 86
98b07853
BP
87/* Maximum number of resubmit actions in a flow translation, whether they are
88 * recursive or not. */
790c5d26 89#define MAX_RESUBMITS (MAX_DEPTH * MAX_DEPTH)
98b07853 90
46c88433
EJ
91struct xbridge {
92 struct hmap_node hmap_node; /* Node in global 'xbridges' map. */
93 struct ofproto_dpif *ofproto; /* Key in global 'xbridges' map. */
94
ca6ba700 95 struct ovs_list xbundles; /* Owned xbundles. */
46c88433
EJ
96 struct hmap xports; /* Indexed by ofp_port. */
97
98 char *name; /* Name used in log messages. */
89a8a7f0 99 struct dpif *dpif; /* Datapath interface. */
46c88433 100 struct mac_learning *ml; /* Mac learning handle. */
6d95c4e8 101 struct mcast_snooping *ms; /* Multicast Snooping handle. */
46c88433
EJ
102 struct mbridge *mbridge; /* Mirroring. */
103 struct dpif_sflow *sflow; /* SFlow handle, or null. */
104 struct dpif_ipfix *ipfix; /* Ipfix handle, or null. */
ce3955be 105 struct netflow *netflow; /* Netflow handle, or null. */
9d189a50 106 struct stp *stp; /* STP or null if disabled. */
9efd308e 107 struct rstp *rstp; /* RSTP or null if disabled. */
46c88433 108
46c88433
EJ
109 bool has_in_band; /* Bridge has in band control? */
110 bool forward_bpdu; /* Bridge forwards STP BPDUs? */
4b97b70d 111
b440dd8c
JS
112 /* Datapath feature support. */
113 struct dpif_backer_support support;
46c88433
EJ
114};
115
116struct xbundle {
117 struct hmap_node hmap_node; /* In global 'xbundles' map. */
118 struct ofbundle *ofbundle; /* Key in global 'xbundles' map. */
119
ca6ba700 120 struct ovs_list list_node; /* In parent 'xbridges' list. */
46c88433
EJ
121 struct xbridge *xbridge; /* Parent xbridge. */
122
ca6ba700 123 struct ovs_list xports; /* Contains "struct xport"s. */
46c88433
EJ
124
125 char *name; /* Name used in log messages. */
126 struct bond *bond; /* Nonnull iff more than one port. */
127 struct lacp *lacp; /* LACP handle or null. */
128
129 enum port_vlan_mode vlan_mode; /* VLAN mode. */
130 int vlan; /* -1=trunk port, else a 12-bit VLAN ID. */
131 unsigned long *trunks; /* Bitmap of trunked VLANs, if 'vlan' == -1.
132 * NULL if all VLANs are trunked. */
133 bool use_priority_tags; /* Use 802.1p tag for frames in VLAN 0? */
134 bool floodable; /* No port has OFPUTIL_PC_NO_FLOOD set? */
c005f976 135 bool protected; /* Protected port mode */
46c88433
EJ
136};
137
138struct xport {
139 struct hmap_node hmap_node; /* Node in global 'xports' map. */
140 struct ofport_dpif *ofport; /* Key in global 'xports map. */
141
142 struct hmap_node ofp_node; /* Node in parent xbridge 'xports' map. */
143 ofp_port_t ofp_port; /* Key in parent xbridge 'xports' map. */
144
145 odp_port_t odp_port; /* Datapath port number or ODPP_NONE. */
146
ca6ba700 147 struct ovs_list bundle_node; /* In parent xbundle (if it exists). */
46c88433
EJ
148 struct xbundle *xbundle; /* Parent xbundle or null. */
149
150 struct netdev *netdev; /* 'ofport''s netdev. */
151
152 struct xbridge *xbridge; /* Parent bridge. */
153 struct xport *peer; /* Patch port peer or null. */
154
155 enum ofputil_port_config config; /* OpenFlow port configuration. */
dd8cd4b4 156 enum ofputil_port_state state; /* OpenFlow port state. */
92cf817b 157 int stp_port_no; /* STP port number or -1 if not in use. */
f025bcb7 158 struct rstp_port *rstp_port; /* RSTP port or null. */
46c88433 159
55954f6e
EJ
160 struct hmap skb_priorities; /* Map of 'skb_priority_to_dscp's. */
161
46c88433
EJ
162 bool may_enable; /* May be enabled in bonds. */
163 bool is_tunnel; /* Is a tunnel port. */
164
165 struct cfm *cfm; /* CFM handle or null. */
166 struct bfd *bfd; /* BFD handle or null. */
0477baa9 167 struct lldp *lldp; /* LLDP handle or null. */
46c88433
EJ
168};
169
4d0acc70
EJ
170struct xlate_ctx {
171 struct xlate_in *xin;
172 struct xlate_out *xout;
173
46c88433 174 const struct xbridge *xbridge;
4d0acc70
EJ
175
176 /* Flow at the last commit. */
177 struct flow base_flow;
178
179 /* Tunnel IP destination address as received. This is stored separately
180 * as the base_flow.tunnel is cleared on init to reflect the datapath
181 * behavior. Used to make sure not to send tunneled output to ourselves,
182 * which might lead to an infinite loop. This could happen easily
183 * if a tunnel is marked as 'ip_remote=flow', and the flow does not
184 * actually set the tun_dst field. */
e4d3706c 185 struct in6_addr orig_tunnel_ipv6_dst;
4d0acc70 186
84cf3c1f
JR
187 /* Stack for the push and pop actions. See comment above nx_stack_push()
188 * in nx-match.c for info on how the stack is stored. */
4d0acc70
EJ
189 struct ofpbuf stack;
190
191 /* The rule that we are currently translating, or NULL. */
192 struct rule_dpif *rule;
193
49a73e0c
BP
194 /* Flow translation populates this with wildcards relevant in translation.
195 * When 'xin->wc' is nonnull, this is the same pointer. When 'xin->wc' is
c0e638aa 196 * null, this is a pointer to a temporary buffer. */
49a73e0c
BP
197 struct flow_wildcards *wc;
198
1520ef4f
BP
199 /* Output buffer for datapath actions. When 'xin->odp_actions' is nonnull,
200 * this is the same pointer. When 'xin->odp_actions' is null, this points
201 * to a scratch ofpbuf. This allows code to add actions to
202 * 'ctx->odp_actions' without worrying about whether the caller really
203 * wants actions. */
204 struct ofpbuf *odp_actions;
205
790c5d26
BP
206 /* Statistics maintained by xlate_table_action().
207 *
2d9b49dd 208 * These statistics limit the amount of work that a single flow
790c5d26
BP
209 * translation can perform. The goal of the first of these, 'depth', is
210 * primarily to prevent translation from performing an infinite amount of
211 * work. It counts the current depth of nested "resubmit"s (and a few
212 * other activities); when a resubmit returns, it decreases. Resubmits to
213 * tables in strictly monotonically increasing order don't contribute to
214 * 'depth' because they cannot cause a flow translation to take an infinite
215 * amount of time (because the number of tables is finite). Translation
216 * aborts when 'depth' exceeds MAX_DEPTH.
217 *
218 * 'resubmits', on the other hand, prevents flow translation from
219 * performing an extraordinarily large while still finite amount of work.
220 * It counts the total number of resubmits (and a few other activities)
221 * that have been executed. Returning from a resubmit does not affect this
222 * counter. Thus, this limits the amount of work that a particular
223 * translation can perform. Translation aborts when 'resubmits' exceeds
224 * MAX_RESUBMITS (which is much larger than MAX_DEPTH).
225 */
790c5d26 226 int depth; /* Current resubmit nesting depth. */
98b07853 227 int resubmits; /* Total number of resubmits. */
5a070238 228 bool in_group; /* Currently translating ofgroup, if true. */
029ca940 229 bool in_action_set; /* Currently translating action_set, if true. */
98b07853 230
4d0acc70 231 uint8_t table_id; /* OpenFlow table ID where flow was found. */
8b1e5560
JR
232 ovs_be64 rule_cookie; /* Cookie of the rule being translated. */
233 uint32_t orig_skb_priority; /* Priority when packet arrived. */
4d0acc70 234 uint32_t sflow_n_outputs; /* Number of output ports. */
4e022ec0 235 odp_port_t sflow_odp_port; /* Output port for composing sFlow action. */
2031ef97 236 ofp_port_t nf_output_iface; /* Output interface index for NetFlow. */
4d0acc70 237 bool exit; /* No further actions should be processed. */
3d6151f3 238 mirror_mask_t mirrors; /* Bitmap of associated mirrors. */
1356dbd1 239 int mirror_snaplen; /* Max size of a mirror packet in byte. */
7fdb60a7 240
1d361a81
BP
241 /* Freezing Translation
242 * ====================
e672ff9b 243 *
1d361a81
BP
244 * At some point during translation, the code may recognize the need to halt
245 * and checkpoint the translation in a way that it can be restarted again
246 * later. We call the checkpointing process "freezing" and the restarting
247 * process "thawing".
e672ff9b 248 *
1d361a81 249 * The use cases for freezing are:
e672ff9b 250 *
1d361a81
BP
251 * - "Recirculation", where the translation process discovers that it
252 * doesn't have enough information to complete translation without
253 * actually executing the actions that have already been translated,
254 * which provides the additionally needed information. In these
255 * situations, translation freezes translation and assigns the frozen
256 * data a unique "recirculation ID", which it associates with the data
257 * in a table in userspace (see ofproto-dpif-rid.h). It also adds a
258 * OVS_ACTION_ATTR_RECIRC action specifying that ID to the datapath
259 * actions. When a packet hits that action, the datapath looks its
260 * flow up again using the ID. If there's a miss, it comes back to
261 * userspace, which find the recirculation table entry for the ID,
262 * thaws the associated frozen data, and continues translation from
263 * that point given the additional information that is now known.
e672ff9b 264 *
1d361a81
BP
265 * The archetypal example is MPLS. As MPLS is implemented in
266 * OpenFlow, the protocol that follows the last MPLS label becomes
267 * known only when that label is popped by an OpenFlow action. That
268 * means that Open vSwitch can't extract the headers beyond the MPLS
269 * labels until the pop action is executed. Thus, at that point
270 * translation uses the recirculation process to extract the headers
271 * beyond the MPLS labels.
e672ff9b 272 *
1d361a81
BP
273 * (OVS also uses OVS_ACTION_ATTR_RECIRC to implement hashing for
274 * output to bonds. OVS pre-populates all the datapath flows for bond
275 * output in the datapath, though, which means that the elaborate
276 * process of coming back to userspace for a second round of
277 * translation isn't needed, and so bonds don't follow the above
278 * process.)
e672ff9b 279 *
77ab5fd2
BP
280 * - "Continuation". A continuation is a way for an OpenFlow controller
281 * to interpose on a packet's traversal of the OpenFlow tables. When
282 * the translation process encounters a "controller" action with the
283 * "pause" flag, it freezes translation, serializes the frozen data,
284 * and sends it to an OpenFlow controller. The controller then
285 * examines and possibly modifies the frozen data and eventually sends
286 * it back to the switch, which thaws it and continues translation.
e672ff9b 287 *
1d361a81
BP
288 * The main problem of freezing translation is preserving state, so that
289 * when the translation is thawed later it resumes from where it left off,
290 * without disruption. In particular, actions must be preserved as follows:
291 *
292 * - If we're freezing because an action needed more information, the
293 * action that prompted it.
294 *
295 * - Any actions remaining to be translated within the current flow.
296 *
297 * - If translation was frozen within a NXAST_RESUBMIT, then any actions
298 * following the resubmit action. Resubmit actions can be nested, so
299 * this has to go all the way up the control stack.
e672ff9b
JR
300 *
301 * - The OpenFlow 1.1+ action set.
302 *
303 * State that actions and flow table lookups can depend on, such as the
304 * following, must also be preserved:
305 *
306 * - Metadata fields (input port, registers, OF1.1+ metadata, ...).
307 *
1d361a81 308 * - The stack used by NXAST_STACK_PUSH and NXAST_STACK_POP actions.
e672ff9b
JR
309 *
310 * - The table ID and cookie of the flow being translated at each level
1d361a81
BP
311 * of the control stack, because these can become visible through
312 * OFPAT_CONTROLLER actions (and other ways).
e672ff9b
JR
313 *
314 * Translation allows for the control of this state preservation via these
1d361a81
BP
315 * members. When a need to freeze translation is identified, the
316 * translation process:
e672ff9b 317 *
1d361a81 318 * 1. Sets 'freezing' to true.
e672ff9b
JR
319 *
320 * 2. Sets 'exit' to true to tell later steps that we're exiting from the
321 * translation process.
322 *
1d361a81
BP
323 * 3. Adds an OFPACT_UNROLL_XLATE action to 'frozen_actions', and points
324 * frozen_actions.header to the action to make it easy to find it later.
325 * This action holds the current table ID and cookie so that they can be
326 * restored during a post-recirculation upcall translation.
e672ff9b
JR
327 *
328 * 4. Adds the action that prompted recirculation and any actions following
1d361a81 329 * it within the same flow to 'frozen_actions', so that they can be
8a5fb3b4 330 * executed during a post-recirculation upcall translation.
e672ff9b
JR
331 *
332 * 5. Returns.
333 *
334 * 6. The action that prompted recirculation might be nested in a stack of
335 * nested "resubmit"s that have actions remaining. Each of these notices
1d361a81
BP
336 * that we're exiting and freezing and responds by adding more
337 * OFPACT_UNROLL_XLATE actions to 'frozen_actions', as necessary,
338 * followed by any actions that were yet unprocessed.
e672ff9b 339 *
1d361a81
BP
340 * If we're freezing because of recirculation, the caller generates a
341 * recirculation ID and associates all the state produced by this process
342 * with it. For post-recirculation upcall translation, the caller passes it
343 * back in for the new translation to execute. The process yielded a set of
344 * ofpacts that can be translated directly, so it is not much of a special
345 * case at that point.
e672ff9b 346 */
1d361a81 347 bool freezing;
53cc166a
JR
348 bool recirc_update_dp_hash; /* Generated recirculation will be preceded
349 * by datapath HASH action to get an updated
350 * dp_hash after recirculation. */
351 uint32_t dp_hash_alg;
352 uint32_t dp_hash_basis;
1d361a81 353 struct ofpbuf frozen_actions;
77ab5fd2 354 const struct ofpact_controller *pause;
e672ff9b 355
e12ec36b
SH
356 /* True if a packet was but is no longer MPLS (due to an MPLS pop action).
357 * This is a trigger for recirculation in cases where translating an action
358 * or looking up a flow requires access to the fields of the packet after
359 * the MPLS label stack that was originally present. */
360 bool was_mpls;
361
07659514
JS
362 /* True if conntrack has been performed on this packet during processing
363 * on the current bridge. This is used to determine whether conntrack
1d361a81 364 * state from the datapath should be honored after thawing. */
07659514
JS
365 bool conntracked;
366
9ac0aada
JR
367 /* Pointer to an embedded NAT action in a conntrack action, or NULL. */
368 struct ofpact_nat *ct_nat_action;
369
7fdb60a7
SH
370 /* OpenFlow 1.1+ action set.
371 *
372 * 'action_set' accumulates "struct ofpact"s added by OFPACT_WRITE_ACTIONS.
373 * When translation is otherwise complete, ofpacts_execute_action_set()
374 * converts it to a set of "struct ofpact"s that can be translated into
ed9c9e3e 375 * datapath actions. */
c61f3870 376 bool action_set_has_group; /* Action set contains OFPACT_GROUP? */
7fdb60a7 377 struct ofpbuf action_set; /* Action set. */
fff1b9c0
JR
378
379 enum xlate_error error; /* Translation failed. */
4d0acc70
EJ
380};
381
fff1b9c0
JR
382const char *xlate_strerror(enum xlate_error error)
383{
384 switch (error) {
385 case XLATE_OK:
386 return "OK";
387 case XLATE_BRIDGE_NOT_FOUND:
388 return "Bridge not found";
389 case XLATE_RECURSION_TOO_DEEP:
390 return "Recursion too deep";
391 case XLATE_TOO_MANY_RESUBMITS:
392 return "Too many resubmits";
393 case XLATE_STACK_TOO_DEEP:
394 return "Stack too deep";
395 case XLATE_NO_RECIRCULATION_CONTEXT:
396 return "No recirculation context";
397 case XLATE_RECIRCULATION_CONFLICT:
398 return "Recirculation conflict";
399 case XLATE_TOO_MANY_MPLS_LABELS:
400 return "Too many MPLS labels";
8d8ab6c2
JG
401 case XLATE_INVALID_TUNNEL_METADATA:
402 return "Invalid tunnel metadata";
fff1b9c0
JR
403 }
404 return "Unknown error";
405}
406
ed9c9e3e 407static void xlate_action_set(struct xlate_ctx *ctx);
704bb0bf 408static void xlate_commit_actions(struct xlate_ctx *ctx);
ed9c9e3e 409
1d741d6d 410static void
1d361a81 411ctx_trigger_freeze(struct xlate_ctx *ctx)
1d741d6d
JR
412{
413 ctx->exit = true;
1d361a81 414 ctx->freezing = true;
1d741d6d
JR
415}
416
53cc166a
JR
417static void
418ctx_trigger_recirculate_with_hash(struct xlate_ctx *ctx, uint32_t type,
419 uint32_t basis)
420{
421 ctx->exit = true;
422 ctx->freezing = true;
423 ctx->recirc_update_dp_hash = true;
424 ctx->dp_hash_alg = type;
425 ctx->dp_hash_basis = basis;
426}
427
1d741d6d 428static bool
1d361a81 429ctx_first_frozen_action(const struct xlate_ctx *ctx)
1d741d6d 430{
1d361a81 431 return !ctx->frozen_actions.size;
e672ff9b
JR
432}
433
3293cb85 434static void
1d361a81 435ctx_cancel_freeze(struct xlate_ctx *ctx)
3293cb85 436{
1d361a81
BP
437 if (ctx->freezing) {
438 ctx->freezing = false;
53cc166a 439 ctx->recirc_update_dp_hash = false;
1d361a81
BP
440 ofpbuf_clear(&ctx->frozen_actions);
441 ctx->frozen_actions.header = NULL;
3293cb85
BP
442 }
443}
444
77ab5fd2 445static void finish_freezing(struct xlate_ctx *ctx);
e672ff9b 446
9583bc14
EJ
447/* A controller may use OFPP_NONE as the ingress port to indicate that
448 * it did not arrive on a "real" port. 'ofpp_none_bundle' exists for
449 * when an input bundle is needed for validation (e.g., mirroring or
450 * OFPP_NORMAL processing). It is not connected to an 'ofproto' or have
3548d242
BP
451 * any 'port' structs, so care must be taken when dealing with it. */
452static struct xbundle ofpp_none_bundle = {
453 .name = "OFPP_NONE",
454 .vlan_mode = PORT_VLAN_TRUNK
455};
9583bc14 456
55954f6e
EJ
457/* Node in 'xport''s 'skb_priorities' map. Used to maintain a map from
458 * 'priority' (the datapath's term for QoS queue) to the dscp bits which all
459 * traffic egressing the 'ofport' with that priority should be marked with. */
460struct skb_priority_to_dscp {
461 struct hmap_node hmap_node; /* Node in 'ofport_dpif''s 'skb_priorities'. */
462 uint32_t skb_priority; /* Priority of this queue (see struct flow). */
463
464 uint8_t dscp; /* DSCP bits to mark outgoing traffic with. */
465};
466
84f0f298
RW
467/* Xlate config contains hash maps of all bridges, bundles and ports.
468 * Xcfgp contains the pointer to the current xlate configuration.
469 * When the main thread needs to change the configuration, it copies xcfgp to
470 * new_xcfg and edits new_xcfg. This enables the use of RCU locking which
471 * does not block handler and revalidator threads. */
472struct xlate_cfg {
473 struct hmap xbridges;
474 struct hmap xbundles;
475 struct hmap xports;
476};
b1b72f2d 477static OVSRCU_TYPE(struct xlate_cfg *) xcfgp = OVSRCU_INITIALIZER(NULL);
f439f23b 478static struct xlate_cfg *new_xcfg = NULL;
46c88433
EJ
479
480static bool may_receive(const struct xport *, struct xlate_ctx *);
9583bc14
EJ
481static void do_xlate_actions(const struct ofpact *, size_t ofpacts_len,
482 struct xlate_ctx *);
adcf00ba 483static void xlate_normal(struct xlate_ctx *);
6d328fa2
SH
484static void xlate_table_action(struct xlate_ctx *, ofp_port_t in_port,
485 uint8_t table_id, bool may_packet_in,
2cd20955 486 bool honor_table_miss, bool with_ct_orig);
2d9b49dd
BP
487static bool input_vid_is_valid(const struct xlate_ctx *,
488 uint16_t vid, struct xbundle *);
46c88433
EJ
489static uint16_t input_vid_to_vlan(const struct xbundle *, uint16_t vid);
490static void output_normal(struct xlate_ctx *, const struct xbundle *,
9583bc14 491 uint16_t vlan);
e93ef1c7
JR
492
493/* Optional bond recirculation parameter to compose_output_action(). */
494struct xlate_bond_recirc {
495 uint32_t recirc_id; /* !0 Use recirculation instead of output. */
496 uint8_t hash_alg; /* !0 Compute hash for recirc before. */
497 uint32_t hash_basis; /* Compute hash for recirc before. */
498};
499
500static void compose_output_action(struct xlate_ctx *, ofp_port_t ofp_port,
501 const struct xlate_bond_recirc *xr);
9583bc14 502
84f0f298
RW
503static struct xbridge *xbridge_lookup(struct xlate_cfg *,
504 const struct ofproto_dpif *);
290835f9
BP
505static struct xbridge *xbridge_lookup_by_uuid(struct xlate_cfg *,
506 const struct uuid *);
84f0f298
RW
507static struct xbundle *xbundle_lookup(struct xlate_cfg *,
508 const struct ofbundle *);
509static struct xport *xport_lookup(struct xlate_cfg *,
510 const struct ofport_dpif *);
46c88433 511static struct xport *get_ofp_port(const struct xbridge *, ofp_port_t ofp_port);
55954f6e
EJ
512static struct skb_priority_to_dscp *get_skb_priority(const struct xport *,
513 uint32_t skb_priority);
514static void clear_skb_priorities(struct xport *);
16194afd 515static size_t count_skb_priorities(const struct xport *);
55954f6e
EJ
516static bool dscp_from_skb_priority(const struct xport *, uint32_t skb_priority,
517 uint8_t *dscp);
46c88433 518
84f0f298
RW
519static void xlate_xbridge_init(struct xlate_cfg *, struct xbridge *);
520static void xlate_xbundle_init(struct xlate_cfg *, struct xbundle *);
521static void xlate_xport_init(struct xlate_cfg *, struct xport *);
9efd308e 522static void xlate_xbridge_set(struct xbridge *, struct dpif *,
9efd308e
DV
523 const struct mac_learning *, struct stp *,
524 struct rstp *, const struct mcast_snooping *,
525 const struct mbridge *,
526 const struct dpif_sflow *,
527 const struct dpif_ipfix *,
2f47cdf4 528 const struct netflow *,
84f0f298 529 bool forward_bpdu, bool has_in_band,
b440dd8c 530 const struct dpif_backer_support *);
84f0f298
RW
531static void xlate_xbundle_set(struct xbundle *xbundle,
532 enum port_vlan_mode vlan_mode, int vlan,
533 unsigned long *trunks, bool use_priority_tags,
534 const struct bond *bond, const struct lacp *lacp,
c005f976 535 bool floodable, bool protected);
84f0f298
RW
536static void xlate_xport_set(struct xport *xport, odp_port_t odp_port,
537 const struct netdev *netdev, const struct cfm *cfm,
0477baa9
DF
538 const struct bfd *bfd, const struct lldp *lldp,
539 int stp_port_no, const struct rstp_port *rstp_port,
84f0f298
RW
540 enum ofputil_port_config config,
541 enum ofputil_port_state state, bool is_tunnel,
542 bool may_enable);
543static void xlate_xbridge_remove(struct xlate_cfg *, struct xbridge *);
544static void xlate_xbundle_remove(struct xlate_cfg *, struct xbundle *);
545static void xlate_xport_remove(struct xlate_cfg *, struct xport *);
546static void xlate_xbridge_copy(struct xbridge *);
547static void xlate_xbundle_copy(struct xbridge *, struct xbundle *);
548static void xlate_xport_copy(struct xbridge *, struct xbundle *,
549 struct xport *);
550static void xlate_xcfg_free(struct xlate_cfg *);
2d9b49dd
BP
551\f
552/* Tracing helpers. */
553
554/* If tracing is enabled in 'ctx', creates a new trace node and appends it to
555 * the list of nodes maintained in ctx->xin. The new node has type 'type' and
556 * its text is created from 'format' by treating it as a printf format string.
557 * Returns the list of nodes embedded within the new trace node; ordinarily,
558 * the calleer can ignore this, but it is useful if the caller needs to nest
559 * more trace nodes within the new node.
560 *
561 * If tracing is not enabled, does nothing and returns NULL. */
562static struct ovs_list * OVS_PRINTF_FORMAT(3, 4)
563xlate_report(const struct xlate_ctx *ctx, enum oftrace_node_type type,
564 const char *format, ...)
34dd0d78 565{
2d9b49dd
BP
566 struct ovs_list *subtrace = NULL;
567 if (OVS_UNLIKELY(ctx->xin->trace)) {
c1b3756c 568 va_list args;
c1b3756c 569 va_start(args, format);
2d9b49dd
BP
570 char *text = xvasprintf(format, args);
571 subtrace = &oftrace_report(ctx->xin->trace, type, text)->subs;
c1b3756c 572 va_end(args);
2d9b49dd 573 free(text);
34dd0d78 574 }
2d9b49dd 575 return subtrace;
34dd0d78 576}
84f0f298 577
2d9b49dd
BP
578/* This is like xlate_report() for errors that are serious enough that we
579 * should log them even if we are not tracing. */
580static void OVS_PRINTF_FORMAT(2, 3)
581xlate_report_error(const struct xlate_ctx *ctx, const char *format, ...)
582{
583 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
584 if (!OVS_UNLIKELY(ctx->xin->trace)
585 && (!ctx->xin->packet || VLOG_DROP_WARN(&rl))) {
586 return;
587 }
588
589 struct ds s = DS_EMPTY_INITIALIZER;
590 va_list args;
591 va_start(args, format);
592 ds_put_format_valist(&s, format, args);
593 va_end(args);
594
595 if (ctx->xin->trace) {
596 oftrace_report(ctx->xin->trace, OFT_ERROR, ds_cstr(&s));
597 } else {
598 ds_put_cstr(&s, " while processing ");
599 flow_format(&s, &ctx->base_flow);
600 ds_put_format(&s, " on bridge %s", ctx->xbridge->name);
601 VLOG_WARN("%s", ds_cstr(&s));
602 }
603 ds_destroy(&s);
604}
605
606/* This is like xlate_report() for messages that should be logged at debug
607 * level (even if we are not tracing) because they can be valuable for
608 * debugging. */
609static void OVS_PRINTF_FORMAT(3, 4)
610xlate_report_debug(const struct xlate_ctx *ctx, enum oftrace_node_type type,
611 const char *format, ...)
612{
613 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(30, 300);
614 if (!OVS_UNLIKELY(ctx->xin->trace)
615 && (!ctx->xin->packet || VLOG_DROP_DBG(&rl))) {
616 return;
617 }
618
619 struct ds s = DS_EMPTY_INITIALIZER;
620 va_list args;
621 va_start(args, format);
622 ds_put_format_valist(&s, format, args);
623 va_end(args);
fff1b9c0 624
2d9b49dd
BP
625 if (ctx->xin->trace) {
626 oftrace_report(ctx->xin->trace, type, ds_cstr(&s));
627 } else {
628 VLOG_DBG("bridge %s: %s", ctx->xbridge->name, ds_cstr(&s));
629 }
630 ds_destroy(&s);
631}
fff1b9c0 632
2d9b49dd
BP
633/* If tracing is enabled in 'ctx', appends a node of the given 'type' to the
634 * trace, whose text is 'title' followed by a formatted version of the
635 * 'ofpacts_len' OpenFlow actions in 'ofpacts'.
636 *
637 * If tracing is not enabled, does nothing. */
638static void
639xlate_report_actions(const struct xlate_ctx *ctx, enum oftrace_node_type type,
640 const char *title,
d6bef3cc
BP
641 const struct ofpact *ofpacts, size_t ofpacts_len)
642{
2d9b49dd 643 if (OVS_UNLIKELY(ctx->xin->trace)) {
d6bef3cc 644 struct ds s = DS_EMPTY_INITIALIZER;
2d9b49dd 645 ds_put_format(&s, "%s: ", title);
d6bef3cc 646 ofpacts_format(ofpacts, ofpacts_len, &s);
2d9b49dd 647 oftrace_report(ctx->xin->trace, type, ds_cstr(&s));
d6bef3cc
BP
648 ds_destroy(&s);
649 }
650}
651
2d9b49dd
BP
652/* If tracing is enabled in 'ctx', appends a node of type OFT_DETAIL to the
653 * trace, whose the message is a formatted version of the OpenFlow action set.
654 * 'verb' should be "was" or "is", depending on whether the action set reported
655 * is the new action set or the old one.
656 *
657 * If tracing is not enabled, does nothing. */
658static void
659xlate_report_action_set(const struct xlate_ctx *ctx, const char *verb)
660{
661 if (OVS_UNLIKELY(ctx->xin->trace)) {
662 struct ofpbuf action_list;
663 ofpbuf_init(&action_list, 0);
664 ofpacts_execute_action_set(&action_list, &ctx->action_set);
665 if (action_list.size) {
666 struct ds s = DS_EMPTY_INITIALIZER;
667 ofpacts_format(action_list.data, action_list.size, &s);
668 xlate_report(ctx, OFT_DETAIL, "action set %s: %s",
669 verb, ds_cstr(&s));
670 ds_destroy(&s);
671 } else {
672 xlate_report(ctx, OFT_DETAIL, "action set %s empty", verb);
673 }
674 ofpbuf_uninit(&action_list);
675 }
676}
677
678
679/* If tracing is enabled in 'ctx', appends a node representing 'rule' (in
680 * OpenFlow table 'table_id') to the trace and makes this node the parent for
681 * future trace nodes. The caller should save ctx->xin->trace before calling
682 * this function, then after tracing all of the activities under the table,
683 * restore its previous value.
684 *
685 * If tracing is not enabled, does nothing. */
686static void
687xlate_report_table(const struct xlate_ctx *ctx, struct rule_dpif *rule,
688 uint8_t table_id)
689{
690 if (OVS_LIKELY(!ctx->xin->trace)) {
691 return;
692 }
693
694 struct ds s = DS_EMPTY_INITIALIZER;
695 ds_put_format(&s, "%2d. ", table_id);
696 if (rule == ctx->xin->ofproto->miss_rule) {
697 ds_put_cstr(&s, "No match, and a \"packet-in\" is called for.");
698 } else if (rule == ctx->xin->ofproto->no_packet_in_rule) {
699 ds_put_cstr(&s, "No match.");
700 } else if (rule == ctx->xin->ofproto->drop_frags_rule) {
701 ds_put_cstr(&s, "Packets are IP fragments and "
702 "the fragment handling mode is \"drop\".");
703 } else {
704 minimatch_format(&rule->up.cr.match,
705 ofproto_get_tun_tab(&ctx->xin->ofproto->up),
706 &s, OFP_DEFAULT_PRIORITY);
707 if (ds_last(&s) != ' ') {
708 ds_put_cstr(&s, ", ");
709 }
710 ds_put_format(&s, "priority %d", rule->up.cr.priority);
711 if (rule->up.flow_cookie) {
712 ds_put_format(&s, ", cookie %#"PRIx64,
713 ntohll(rule->up.flow_cookie));
714 }
715 }
716 ctx->xin->trace = &oftrace_report(ctx->xin->trace, OFT_TABLE,
717 ds_cstr(&s))->subs;
718 ds_destroy(&s);
719}
720
721/* If tracing is enabled in 'ctx', adds an OFT_DETAIL trace node to 'ctx'
722 * reporting the value of subfield 'sf'.
723 *
724 * If tracing is not enabled, does nothing. */
725static void
726xlate_report_subfield(const struct xlate_ctx *ctx,
727 const struct mf_subfield *sf)
728{
729 if (OVS_UNLIKELY(ctx->xin->trace)) {
730 struct ds s = DS_EMPTY_INITIALIZER;
731 mf_format_subfield(sf, &s);
732 ds_put_cstr(&s, " is now ");
733
734 if (sf->ofs == 0 && sf->n_bits >= sf->field->n_bits) {
735 union mf_value value;
736 mf_get_value(sf->field, &ctx->xin->flow, &value);
737 mf_format(sf->field, &value, NULL, &s);
738 } else {
739 union mf_subvalue cst;
740 mf_read_subfield(sf, &ctx->xin->flow, &cst);
741 ds_put_hex(&s, &cst, sizeof cst);
742 }
743
744 xlate_report(ctx, OFT_DETAIL, "%s", ds_cstr(&s));
745
746 ds_destroy(&s);
747 }
748}
749\f
84f0f298
RW
750static void
751xlate_xbridge_init(struct xlate_cfg *xcfg, struct xbridge *xbridge)
752{
417e7e66 753 ovs_list_init(&xbridge->xbundles);
84f0f298
RW
754 hmap_init(&xbridge->xports);
755 hmap_insert(&xcfg->xbridges, &xbridge->hmap_node,
756 hash_pointer(xbridge->ofproto, 0));
757}
758
759static void
760xlate_xbundle_init(struct xlate_cfg *xcfg, struct xbundle *xbundle)
761{
417e7e66
BW
762 ovs_list_init(&xbundle->xports);
763 ovs_list_insert(&xbundle->xbridge->xbundles, &xbundle->list_node);
84f0f298
RW
764 hmap_insert(&xcfg->xbundles, &xbundle->hmap_node,
765 hash_pointer(xbundle->ofbundle, 0));
766}
767
768static void
769xlate_xport_init(struct xlate_cfg *xcfg, struct xport *xport)
770{
771 hmap_init(&xport->skb_priorities);
772 hmap_insert(&xcfg->xports, &xport->hmap_node,
773 hash_pointer(xport->ofport, 0));
774 hmap_insert(&xport->xbridge->xports, &xport->ofp_node,
775 hash_ofp_port(xport->ofp_port));
776}
777
778static void
779xlate_xbridge_set(struct xbridge *xbridge,
780 struct dpif *dpif,
ec89fc6f 781 const struct mac_learning *ml, struct stp *stp,
9efd308e 782 struct rstp *rstp, const struct mcast_snooping *ms,
ec89fc6f 783 const struct mbridge *mbridge,
46c88433 784 const struct dpif_sflow *sflow,
ce3955be 785 const struct dpif_ipfix *ipfix,
2f47cdf4 786 const struct netflow *netflow,
4b97b70d 787 bool forward_bpdu, bool has_in_band,
b440dd8c 788 const struct dpif_backer_support *support)
46c88433 789{
46c88433
EJ
790 if (xbridge->ml != ml) {
791 mac_learning_unref(xbridge->ml);
792 xbridge->ml = mac_learning_ref(ml);
793 }
794
6d95c4e8
FL
795 if (xbridge->ms != ms) {
796 mcast_snooping_unref(xbridge->ms);
797 xbridge->ms = mcast_snooping_ref(ms);
798 }
799
46c88433
EJ
800 if (xbridge->mbridge != mbridge) {
801 mbridge_unref(xbridge->mbridge);
802 xbridge->mbridge = mbridge_ref(mbridge);
803 }
804
805 if (xbridge->sflow != sflow) {
806 dpif_sflow_unref(xbridge->sflow);
807 xbridge->sflow = dpif_sflow_ref(sflow);
808 }
809
810 if (xbridge->ipfix != ipfix) {
811 dpif_ipfix_unref(xbridge->ipfix);
812 xbridge->ipfix = dpif_ipfix_ref(ipfix);
813 }
814
9d189a50
EJ
815 if (xbridge->stp != stp) {
816 stp_unref(xbridge->stp);
817 xbridge->stp = stp_ref(stp);
818 }
819
9efd308e
DV
820 if (xbridge->rstp != rstp) {
821 rstp_unref(xbridge->rstp);
822 xbridge->rstp = rstp_ref(rstp);
823 }
824
ce3955be
EJ
825 if (xbridge->netflow != netflow) {
826 netflow_unref(xbridge->netflow);
827 xbridge->netflow = netflow_ref(netflow);
828 }
829
89a8a7f0 830 xbridge->dpif = dpif;
46c88433
EJ
831 xbridge->forward_bpdu = forward_bpdu;
832 xbridge->has_in_band = has_in_band;
b440dd8c 833 xbridge->support = *support;
46c88433
EJ
834}
835
84f0f298
RW
836static void
837xlate_xbundle_set(struct xbundle *xbundle,
838 enum port_vlan_mode vlan_mode, int vlan,
839 unsigned long *trunks, bool use_priority_tags,
840 const struct bond *bond, const struct lacp *lacp,
c005f976 841 bool floodable, bool protected)
84f0f298
RW
842{
843 ovs_assert(xbundle->xbridge);
844
845 xbundle->vlan_mode = vlan_mode;
846 xbundle->vlan = vlan;
847 xbundle->trunks = trunks;
848 xbundle->use_priority_tags = use_priority_tags;
849 xbundle->floodable = floodable;
c005f976 850 xbundle->protected = protected;
84f0f298
RW
851
852 if (xbundle->bond != bond) {
853 bond_unref(xbundle->bond);
854 xbundle->bond = bond_ref(bond);
855 }
856
857 if (xbundle->lacp != lacp) {
858 lacp_unref(xbundle->lacp);
859 xbundle->lacp = lacp_ref(lacp);
860 }
861}
862
863static void
864xlate_xport_set(struct xport *xport, odp_port_t odp_port,
865 const struct netdev *netdev, const struct cfm *cfm,
0477baa9 866 const struct bfd *bfd, const struct lldp *lldp, int stp_port_no,
f025bcb7 867 const struct rstp_port* rstp_port,
84f0f298
RW
868 enum ofputil_port_config config, enum ofputil_port_state state,
869 bool is_tunnel, bool may_enable)
870{
871 xport->config = config;
872 xport->state = state;
873 xport->stp_port_no = stp_port_no;
874 xport->is_tunnel = is_tunnel;
875 xport->may_enable = may_enable;
876 xport->odp_port = odp_port;
877
f025bcb7
JR
878 if (xport->rstp_port != rstp_port) {
879 rstp_port_unref(xport->rstp_port);
880 xport->rstp_port = rstp_port_ref(rstp_port);
881 }
882
84f0f298
RW
883 if (xport->cfm != cfm) {
884 cfm_unref(xport->cfm);
885 xport->cfm = cfm_ref(cfm);
886 }
887
888 if (xport->bfd != bfd) {
889 bfd_unref(xport->bfd);
890 xport->bfd = bfd_ref(bfd);
891 }
892
0477baa9
DF
893 if (xport->lldp != lldp) {
894 lldp_unref(xport->lldp);
895 xport->lldp = lldp_ref(lldp);
896 }
897
84f0f298
RW
898 if (xport->netdev != netdev) {
899 netdev_close(xport->netdev);
900 xport->netdev = netdev_ref(netdev);
901 }
902}
903
904static void
905xlate_xbridge_copy(struct xbridge *xbridge)
906{
907 struct xbundle *xbundle;
908 struct xport *xport;
909 struct xbridge *new_xbridge = xzalloc(sizeof *xbridge);
910 new_xbridge->ofproto = xbridge->ofproto;
911 new_xbridge->name = xstrdup(xbridge->name);
912 xlate_xbridge_init(new_xcfg, new_xbridge);
913
914 xlate_xbridge_set(new_xbridge,
34dd0d78 915 xbridge->dpif, xbridge->ml, xbridge->stp,
9efd308e
DV
916 xbridge->rstp, xbridge->ms, xbridge->mbridge,
917 xbridge->sflow, xbridge->ipfix, xbridge->netflow,
b440dd8c
JS
918 xbridge->forward_bpdu, xbridge->has_in_band,
919 &xbridge->support);
84f0f298
RW
920 LIST_FOR_EACH (xbundle, list_node, &xbridge->xbundles) {
921 xlate_xbundle_copy(new_xbridge, xbundle);
922 }
923
924 /* Copy xports which are not part of a xbundle */
925 HMAP_FOR_EACH (xport, ofp_node, &xbridge->xports) {
926 if (!xport->xbundle) {
927 xlate_xport_copy(new_xbridge, NULL, xport);
928 }
929 }
930}
931
932static void
933xlate_xbundle_copy(struct xbridge *xbridge, struct xbundle *xbundle)
934{
935 struct xport *xport;
936 struct xbundle *new_xbundle = xzalloc(sizeof *xbundle);
937 new_xbundle->ofbundle = xbundle->ofbundle;
938 new_xbundle->xbridge = xbridge;
939 new_xbundle->name = xstrdup(xbundle->name);
940 xlate_xbundle_init(new_xcfg, new_xbundle);
941
942 xlate_xbundle_set(new_xbundle, xbundle->vlan_mode,
943 xbundle->vlan, xbundle->trunks,
944 xbundle->use_priority_tags, xbundle->bond, xbundle->lacp,
c005f976 945 xbundle->floodable, xbundle->protected);
84f0f298
RW
946 LIST_FOR_EACH (xport, bundle_node, &xbundle->xports) {
947 xlate_xport_copy(xbridge, new_xbundle, xport);
948 }
949}
950
951static void
952xlate_xport_copy(struct xbridge *xbridge, struct xbundle *xbundle,
953 struct xport *xport)
954{
955 struct skb_priority_to_dscp *pdscp, *new_pdscp;
956 struct xport *new_xport = xzalloc(sizeof *xport);
957 new_xport->ofport = xport->ofport;
958 new_xport->ofp_port = xport->ofp_port;
959 new_xport->xbridge = xbridge;
960 xlate_xport_init(new_xcfg, new_xport);
961
962 xlate_xport_set(new_xport, xport->odp_port, xport->netdev, xport->cfm,
0477baa9
DF
963 xport->bfd, xport->lldp, xport->stp_port_no,
964 xport->rstp_port, xport->config, xport->state,
965 xport->is_tunnel, xport->may_enable);
84f0f298
RW
966
967 if (xport->peer) {
968 struct xport *peer = xport_lookup(new_xcfg, xport->peer->ofport);
969 if (peer) {
970 new_xport->peer = peer;
971 new_xport->peer->peer = new_xport;
972 }
973 }
974
975 if (xbundle) {
976 new_xport->xbundle = xbundle;
417e7e66 977 ovs_list_insert(&new_xport->xbundle->xports, &new_xport->bundle_node);
84f0f298
RW
978 }
979
980 HMAP_FOR_EACH (pdscp, hmap_node, &xport->skb_priorities) {
981 new_pdscp = xmalloc(sizeof *pdscp);
982 new_pdscp->skb_priority = pdscp->skb_priority;
983 new_pdscp->dscp = pdscp->dscp;
984 hmap_insert(&new_xport->skb_priorities, &new_pdscp->hmap_node,
985 hash_int(new_pdscp->skb_priority, 0));
986 }
987}
988
989/* Sets the current xlate configuration to new_xcfg and frees the old xlate
990 * configuration in xcfgp.
991 *
992 * This needs to be called after editing the xlate configuration.
993 *
994 * Functions that edit the new xlate configuration are
6cd20a22 995 * xlate_<ofproto/bundle/ofport>_set and xlate_<ofproto/bundle/ofport>_remove.
84f0f298
RW
996 *
997 * A sample workflow:
998 *
999 * xlate_txn_start();
1000 * ...
1001 * edit_xlate_configuration();
1002 * ...
1003 * xlate_txn_commit(); */
46c88433 1004void
84f0f298
RW
1005xlate_txn_commit(void)
1006{
1007 struct xlate_cfg *xcfg = ovsrcu_get(struct xlate_cfg *, &xcfgp);
1008
1009 ovsrcu_set(&xcfgp, new_xcfg);
40a9c4c2
AW
1010 ovsrcu_synchronize();
1011 xlate_xcfg_free(xcfg);
84f0f298
RW
1012 new_xcfg = NULL;
1013}
1014
1015/* Copies the current xlate configuration in xcfgp to new_xcfg.
1016 *
1017 * This needs to be called prior to editing the xlate configuration. */
1018void
1019xlate_txn_start(void)
1020{
1021 struct xbridge *xbridge;
1022 struct xlate_cfg *xcfg;
1023
1024 ovs_assert(!new_xcfg);
1025
1026 new_xcfg = xmalloc(sizeof *new_xcfg);
1027 hmap_init(&new_xcfg->xbridges);
1028 hmap_init(&new_xcfg->xbundles);
1029 hmap_init(&new_xcfg->xports);
1030
1031 xcfg = ovsrcu_get(struct xlate_cfg *, &xcfgp);
1032 if (!xcfg) {
1033 return;
1034 }
1035
1036 HMAP_FOR_EACH (xbridge, hmap_node, &xcfg->xbridges) {
1037 xlate_xbridge_copy(xbridge);
1038 }
1039}
1040
1041
1042static void
1043xlate_xcfg_free(struct xlate_cfg *xcfg)
1044{
1045 struct xbridge *xbridge, *next_xbridge;
1046
1047 if (!xcfg) {
1048 return;
1049 }
1050
1051 HMAP_FOR_EACH_SAFE (xbridge, next_xbridge, hmap_node, &xcfg->xbridges) {
1052 xlate_xbridge_remove(xcfg, xbridge);
1053 }
1054
1055 hmap_destroy(&xcfg->xbridges);
1056 hmap_destroy(&xcfg->xbundles);
1057 hmap_destroy(&xcfg->xports);
1058 free(xcfg);
1059}
1060
1061void
1062xlate_ofproto_set(struct ofproto_dpif *ofproto, const char *name,
34dd0d78 1063 struct dpif *dpif,
84f0f298 1064 const struct mac_learning *ml, struct stp *stp,
9efd308e 1065 struct rstp *rstp, const struct mcast_snooping *ms,
84f0f298
RW
1066 const struct mbridge *mbridge,
1067 const struct dpif_sflow *sflow,
1068 const struct dpif_ipfix *ipfix,
2f47cdf4 1069 const struct netflow *netflow,
b440dd8c
JS
1070 bool forward_bpdu, bool has_in_band,
1071 const struct dpif_backer_support *support)
84f0f298
RW
1072{
1073 struct xbridge *xbridge;
1074
1075 ovs_assert(new_xcfg);
1076
1077 xbridge = xbridge_lookup(new_xcfg, ofproto);
1078 if (!xbridge) {
1079 xbridge = xzalloc(sizeof *xbridge);
1080 xbridge->ofproto = ofproto;
1081
1082 xlate_xbridge_init(new_xcfg, xbridge);
1083 }
1084
1085 free(xbridge->name);
1086 xbridge->name = xstrdup(name);
1087
34dd0d78 1088 xlate_xbridge_set(xbridge, dpif, ml, stp, rstp, ms, mbridge, sflow, ipfix,
b440dd8c 1089 netflow, forward_bpdu, has_in_band, support);
84f0f298
RW
1090}
1091
1092static void
1093xlate_xbridge_remove(struct xlate_cfg *xcfg, struct xbridge *xbridge)
46c88433 1094{
46c88433
EJ
1095 struct xbundle *xbundle, *next_xbundle;
1096 struct xport *xport, *next_xport;
1097
1098 if (!xbridge) {
1099 return;
1100 }
1101
1102 HMAP_FOR_EACH_SAFE (xport, next_xport, ofp_node, &xbridge->xports) {
84f0f298 1103 xlate_xport_remove(xcfg, xport);
46c88433
EJ
1104 }
1105
1106 LIST_FOR_EACH_SAFE (xbundle, next_xbundle, list_node, &xbridge->xbundles) {
84f0f298 1107 xlate_xbundle_remove(xcfg, xbundle);
46c88433
EJ
1108 }
1109
84f0f298 1110 hmap_remove(&xcfg->xbridges, &xbridge->hmap_node);
795cc5c1 1111 mac_learning_unref(xbridge->ml);
6d95c4e8 1112 mcast_snooping_unref(xbridge->ms);
795cc5c1
EJ
1113 mbridge_unref(xbridge->mbridge);
1114 dpif_sflow_unref(xbridge->sflow);
1115 dpif_ipfix_unref(xbridge->ipfix);
1116 stp_unref(xbridge->stp);
9efd308e 1117 rstp_unref(xbridge->rstp);
795cc5c1 1118 hmap_destroy(&xbridge->xports);
46c88433
EJ
1119 free(xbridge->name);
1120 free(xbridge);
1121}
1122
84f0f298
RW
1123void
1124xlate_remove_ofproto(struct ofproto_dpif *ofproto)
1125{
1126 struct xbridge *xbridge;
1127
1128 ovs_assert(new_xcfg);
1129
1130 xbridge = xbridge_lookup(new_xcfg, ofproto);
1131 xlate_xbridge_remove(new_xcfg, xbridge);
1132}
1133
46c88433
EJ
1134void
1135xlate_bundle_set(struct ofproto_dpif *ofproto, struct ofbundle *ofbundle,
1136 const char *name, enum port_vlan_mode vlan_mode, int vlan,
1137 unsigned long *trunks, bool use_priority_tags,
1138 const struct bond *bond, const struct lacp *lacp,
c005f976 1139 bool floodable, bool protected)
46c88433 1140{
84f0f298 1141 struct xbundle *xbundle;
46c88433 1142
84f0f298
RW
1143 ovs_assert(new_xcfg);
1144
1145 xbundle = xbundle_lookup(new_xcfg, ofbundle);
46c88433
EJ
1146 if (!xbundle) {
1147 xbundle = xzalloc(sizeof *xbundle);
1148 xbundle->ofbundle = ofbundle;
84f0f298 1149 xbundle->xbridge = xbridge_lookup(new_xcfg, ofproto);
46c88433 1150
84f0f298 1151 xlate_xbundle_init(new_xcfg, xbundle);
46c88433
EJ
1152 }
1153
46c88433
EJ
1154 free(xbundle->name);
1155 xbundle->name = xstrdup(name);
1156
84f0f298 1157 xlate_xbundle_set(xbundle, vlan_mode, vlan, trunks,
c005f976 1158 use_priority_tags, bond, lacp, floodable, protected);
46c88433
EJ
1159}
1160
84f0f298
RW
1161static void
1162xlate_xbundle_remove(struct xlate_cfg *xcfg, struct xbundle *xbundle)
46c88433 1163{
5f03c983 1164 struct xport *xport;
46c88433
EJ
1165
1166 if (!xbundle) {
1167 return;
1168 }
1169
5f03c983 1170 LIST_FOR_EACH_POP (xport, bundle_node, &xbundle->xports) {
46c88433
EJ
1171 xport->xbundle = NULL;
1172 }
1173
84f0f298 1174 hmap_remove(&xcfg->xbundles, &xbundle->hmap_node);
417e7e66 1175 ovs_list_remove(&xbundle->list_node);
46c88433
EJ
1176 bond_unref(xbundle->bond);
1177 lacp_unref(xbundle->lacp);
1178 free(xbundle->name);
1179 free(xbundle);
1180}
1181
84f0f298
RW
1182void
1183xlate_bundle_remove(struct ofbundle *ofbundle)
1184{
1185 struct xbundle *xbundle;
1186
1187 ovs_assert(new_xcfg);
1188
1189 xbundle = xbundle_lookup(new_xcfg, ofbundle);
1190 xlate_xbundle_remove(new_xcfg, xbundle);
1191}
1192
46c88433
EJ
1193void
1194xlate_ofport_set(struct ofproto_dpif *ofproto, struct ofbundle *ofbundle,
1195 struct ofport_dpif *ofport, ofp_port_t ofp_port,
1196 odp_port_t odp_port, const struct netdev *netdev,
1197 const struct cfm *cfm, const struct bfd *bfd,
0477baa9
DF
1198 const struct lldp *lldp, struct ofport_dpif *peer,
1199 int stp_port_no, const struct rstp_port *rstp_port,
55954f6e 1200 const struct ofproto_port_queue *qdscp_list, size_t n_qdscp,
dd8cd4b4
SH
1201 enum ofputil_port_config config,
1202 enum ofputil_port_state state, bool is_tunnel,
9d189a50 1203 bool may_enable)
46c88433 1204{
55954f6e 1205 size_t i;
84f0f298
RW
1206 struct xport *xport;
1207
1208 ovs_assert(new_xcfg);
46c88433 1209
84f0f298 1210 xport = xport_lookup(new_xcfg, ofport);
46c88433
EJ
1211 if (!xport) {
1212 xport = xzalloc(sizeof *xport);
1213 xport->ofport = ofport;
84f0f298 1214 xport->xbridge = xbridge_lookup(new_xcfg, ofproto);
46c88433
EJ
1215 xport->ofp_port = ofp_port;
1216
84f0f298 1217 xlate_xport_init(new_xcfg, xport);
46c88433
EJ
1218 }
1219
1220 ovs_assert(xport->ofp_port == ofp_port);
1221
0477baa9
DF
1222 xlate_xport_set(xport, odp_port, netdev, cfm, bfd, lldp,
1223 stp_port_no, rstp_port, config, state, is_tunnel,
1224 may_enable);
46c88433
EJ
1225
1226 if (xport->peer) {
1227 xport->peer->peer = NULL;
1228 }
84f0f298 1229 xport->peer = xport_lookup(new_xcfg, peer);
46c88433
EJ
1230 if (xport->peer) {
1231 xport->peer->peer = xport;
1232 }
1233
1234 if (xport->xbundle) {
417e7e66 1235 ovs_list_remove(&xport->bundle_node);
46c88433 1236 }
84f0f298 1237 xport->xbundle = xbundle_lookup(new_xcfg, ofbundle);
46c88433 1238 if (xport->xbundle) {
417e7e66 1239 ovs_list_insert(&xport->xbundle->xports, &xport->bundle_node);
46c88433 1240 }
55954f6e
EJ
1241
1242 clear_skb_priorities(xport);
1243 for (i = 0; i < n_qdscp; i++) {
1244 struct skb_priority_to_dscp *pdscp;
1245 uint32_t skb_priority;
1246
89a8a7f0
EJ
1247 if (dpif_queue_to_priority(xport->xbridge->dpif, qdscp_list[i].queue,
1248 &skb_priority)) {
55954f6e
EJ
1249 continue;
1250 }
1251
1252 pdscp = xmalloc(sizeof *pdscp);
1253 pdscp->skb_priority = skb_priority;
1254 pdscp->dscp = (qdscp_list[i].dscp << 2) & IP_DSCP_MASK;
1255 hmap_insert(&xport->skb_priorities, &pdscp->hmap_node,
1256 hash_int(pdscp->skb_priority, 0));
1257 }
46c88433
EJ
1258}
1259
84f0f298
RW
1260static void
1261xlate_xport_remove(struct xlate_cfg *xcfg, struct xport *xport)
46c88433 1262{
46c88433
EJ
1263 if (!xport) {
1264 return;
1265 }
1266
1267 if (xport->peer) {
1268 xport->peer->peer = NULL;
1269 xport->peer = NULL;
1270 }
1271
e621a12d 1272 if (xport->xbundle) {
417e7e66 1273 ovs_list_remove(&xport->bundle_node);
e621a12d
EJ
1274 }
1275
55954f6e
EJ
1276 clear_skb_priorities(xport);
1277 hmap_destroy(&xport->skb_priorities);
1278
84f0f298 1279 hmap_remove(&xcfg->xports, &xport->hmap_node);
46c88433
EJ
1280 hmap_remove(&xport->xbridge->xports, &xport->ofp_node);
1281
1282 netdev_close(xport->netdev);
f025bcb7 1283 rstp_port_unref(xport->rstp_port);
46c88433
EJ
1284 cfm_unref(xport->cfm);
1285 bfd_unref(xport->bfd);
0477baa9 1286 lldp_unref(xport->lldp);
46c88433
EJ
1287 free(xport);
1288}
1289
84f0f298
RW
1290void
1291xlate_ofport_remove(struct ofport_dpif *ofport)
1292{
1293 struct xport *xport;
1294
1295 ovs_assert(new_xcfg);
1296
1297 xport = xport_lookup(new_xcfg, ofport);
1298 xlate_xport_remove(new_xcfg, xport);
1299}
1300
ef377a58
JR
1301static struct ofproto_dpif *
1302xlate_lookup_ofproto_(const struct dpif_backer *backer, const struct flow *flow,
1303 ofp_port_t *ofp_in_port, const struct xport **xportp)
1304{
e672ff9b 1305 struct xlate_cfg *xcfg = ovsrcu_get(struct xlate_cfg *, &xcfgp);
ef377a58 1306 const struct xport *xport;
f9038ef6 1307
e672ff9b
JR
1308 xport = xport_lookup(xcfg, tnl_port_should_receive(flow)
1309 ? tnl_port_receive(flow)
1310 : odp_port_to_ofport(backer, flow->in_port.odp_port));
1311 if (OVS_UNLIKELY(!xport)) {
1312 return NULL;
ef377a58 1313 }
e672ff9b 1314 *xportp = xport;
f9038ef6 1315 if (ofp_in_port) {
e672ff9b 1316 *ofp_in_port = xport->ofp_port;
f9038ef6 1317 }
e672ff9b 1318 return xport->xbridge->ofproto;
ef377a58
JR
1319}
1320
1321/* Given a datapath and flow metadata ('backer', and 'flow' respectively)
1322 * returns the corresponding struct ofproto_dpif and OpenFlow port number. */
1323struct ofproto_dpif *
1324xlate_lookup_ofproto(const struct dpif_backer *backer, const struct flow *flow,
1325 ofp_port_t *ofp_in_port)
1326{
1327 const struct xport *xport;
1328
1329 return xlate_lookup_ofproto_(backer, flow, ofp_in_port, &xport);
1330}
1331
cc377352 1332/* Given a datapath and flow metadata ('backer', and 'flow' respectively),
ef377a58 1333 * optionally populates 'ofproto' with the ofproto_dpif, 'ofp_in_port' with the
cc377352 1334 * openflow in_port, and 'ipfix', 'sflow', and 'netflow' with the appropriate
dcc2c6cd
JR
1335 * handles for those protocols if they're enabled. Caller may use the returned
1336 * pointers until quiescing, for longer term use additional references must
1337 * be taken.
8449c4d6 1338 *
f9038ef6 1339 * Returns 0 if successful, ENODEV if the parsed flow has no associated ofproto.
ef377a58 1340 */
8449c4d6 1341int
5c476ea3
JR
1342xlate_lookup(const struct dpif_backer *backer, const struct flow *flow,
1343 struct ofproto_dpif **ofprotop, struct dpif_ipfix **ipfix,
1344 struct dpif_sflow **sflow, struct netflow **netflow,
1345 ofp_port_t *ofp_in_port)
8449c4d6 1346{
ef377a58 1347 struct ofproto_dpif *ofproto;
84f0f298 1348 const struct xport *xport;
8449c4d6 1349
ef377a58 1350 ofproto = xlate_lookup_ofproto_(backer, flow, ofp_in_port, &xport);
8449c4d6 1351
f9038ef6 1352 if (!ofproto) {
cc377352 1353 return ENODEV;
8449c4d6 1354 }
8449c4d6 1355
ef377a58
JR
1356 if (ofprotop) {
1357 *ofprotop = ofproto;
8449c4d6
EJ
1358 }
1359
1dfdb9b3 1360 if (ipfix) {
f9038ef6 1361 *ipfix = xport ? xport->xbridge->ipfix : NULL;
1dfdb9b3
EJ
1362 }
1363
1364 if (sflow) {
f9038ef6 1365 *sflow = xport ? xport->xbridge->sflow : NULL;
1dfdb9b3
EJ
1366 }
1367
1368 if (netflow) {
f9038ef6 1369 *netflow = xport ? xport->xbridge->netflow : NULL;
1dfdb9b3 1370 }
f9038ef6 1371
cc377352 1372 return 0;
8449c4d6
EJ
1373}
1374
46c88433 1375static struct xbridge *
84f0f298 1376xbridge_lookup(struct xlate_cfg *xcfg, const struct ofproto_dpif *ofproto)
46c88433 1377{
84f0f298 1378 struct hmap *xbridges;
46c88433
EJ
1379 struct xbridge *xbridge;
1380
84f0f298 1381 if (!ofproto || !xcfg) {
5e6af486
EJ
1382 return NULL;
1383 }
1384
84f0f298
RW
1385 xbridges = &xcfg->xbridges;
1386
46c88433 1387 HMAP_FOR_EACH_IN_BUCKET (xbridge, hmap_node, hash_pointer(ofproto, 0),
84f0f298 1388 xbridges) {
46c88433
EJ
1389 if (xbridge->ofproto == ofproto) {
1390 return xbridge;
1391 }
1392 }
1393 return NULL;
1394}
1395
290835f9
BP
1396static struct xbridge *
1397xbridge_lookup_by_uuid(struct xlate_cfg *xcfg, const struct uuid *uuid)
1398{
1399 struct xbridge *xbridge;
1400
1401 HMAP_FOR_EACH (xbridge, hmap_node, &xcfg->xbridges) {
07a3cd5c 1402 if (uuid_equals(&xbridge->ofproto->uuid, uuid)) {
290835f9
BP
1403 return xbridge;
1404 }
1405 }
1406 return NULL;
1407}
1408
46c88433 1409static struct xbundle *
84f0f298 1410xbundle_lookup(struct xlate_cfg *xcfg, const struct ofbundle *ofbundle)
46c88433 1411{
84f0f298 1412 struct hmap *xbundles;
46c88433
EJ
1413 struct xbundle *xbundle;
1414
84f0f298 1415 if (!ofbundle || !xcfg) {
5e6af486
EJ
1416 return NULL;
1417 }
1418
84f0f298
RW
1419 xbundles = &xcfg->xbundles;
1420
46c88433 1421 HMAP_FOR_EACH_IN_BUCKET (xbundle, hmap_node, hash_pointer(ofbundle, 0),
84f0f298 1422 xbundles) {
46c88433
EJ
1423 if (xbundle->ofbundle == ofbundle) {
1424 return xbundle;
1425 }
1426 }
1427 return NULL;
1428}
1429
1430static struct xport *
84f0f298 1431xport_lookup(struct xlate_cfg *xcfg, const struct ofport_dpif *ofport)
46c88433 1432{
84f0f298 1433 struct hmap *xports;
46c88433
EJ
1434 struct xport *xport;
1435
84f0f298 1436 if (!ofport || !xcfg) {
5e6af486
EJ
1437 return NULL;
1438 }
1439
84f0f298
RW
1440 xports = &xcfg->xports;
1441
46c88433 1442 HMAP_FOR_EACH_IN_BUCKET (xport, hmap_node, hash_pointer(ofport, 0),
84f0f298 1443 xports) {
46c88433
EJ
1444 if (xport->ofport == ofport) {
1445 return xport;
1446 }
1447 }
1448 return NULL;
1449}
1450
40085e56
EJ
1451static struct stp_port *
1452xport_get_stp_port(const struct xport *xport)
1453{
92cf817b 1454 return xport->xbridge->stp && xport->stp_port_no != -1
40085e56
EJ
1455 ? stp_get_port(xport->xbridge->stp, xport->stp_port_no)
1456 : NULL;
1457}
9d189a50 1458
0d1cee12 1459static bool
9d189a50
EJ
1460xport_stp_learn_state(const struct xport *xport)
1461{
40085e56 1462 struct stp_port *sp = xport_get_stp_port(xport);
4b5f1996
DV
1463 return sp
1464 ? stp_learn_in_state(stp_port_get_state(sp))
1465 : true;
9d189a50
EJ
1466}
1467
1468static bool
1469xport_stp_forward_state(const struct xport *xport)
1470{
40085e56 1471 struct stp_port *sp = xport_get_stp_port(xport);
4b5f1996
DV
1472 return sp
1473 ? stp_forward_in_state(stp_port_get_state(sp))
1474 : true;
9d189a50
EJ
1475}
1476
0d1cee12 1477static bool
bacdb85a 1478xport_stp_should_forward_bpdu(const struct xport *xport)
0d1cee12
K
1479{
1480 struct stp_port *sp = xport_get_stp_port(xport);
bacdb85a 1481 return stp_should_forward_bpdu(sp ? stp_port_get_state(sp) : STP_DISABLED);
0d1cee12
K
1482}
1483
9d189a50
EJ
1484/* Returns true if STP should process 'flow'. Sets fields in 'wc' that
1485 * were used to make the determination.*/
1486static bool
1487stp_should_process_flow(const struct flow *flow, struct flow_wildcards *wc)
1488{
bbbca389 1489 /* is_stp() also checks dl_type, but dl_type is always set in 'wc'. */
9d189a50 1490 memset(&wc->masks.dl_dst, 0xff, sizeof wc->masks.dl_dst);
bbbca389 1491 return is_stp(flow);
9d189a50
EJ
1492}
1493
1494static void
cf62fa4c 1495stp_process_packet(const struct xport *xport, const struct dp_packet *packet)
9d189a50 1496{
40085e56 1497 struct stp_port *sp = xport_get_stp_port(xport);
cf62fa4c
PS
1498 struct dp_packet payload = *packet;
1499 struct eth_header *eth = dp_packet_data(&payload);
9d189a50
EJ
1500
1501 /* Sink packets on ports that have STP disabled when the bridge has
1502 * STP enabled. */
1503 if (!sp || stp_port_get_state(sp) == STP_DISABLED) {
1504 return;
1505 }
1506
1507 /* Trim off padding on payload. */
cf62fa4c
PS
1508 if (dp_packet_size(&payload) > ntohs(eth->eth_type) + ETH_HEADER_LEN) {
1509 dp_packet_set_size(&payload, ntohs(eth->eth_type) + ETH_HEADER_LEN);
9d189a50
EJ
1510 }
1511
cf62fa4c
PS
1512 if (dp_packet_try_pull(&payload, ETH_HEADER_LEN + LLC_HEADER_LEN)) {
1513 stp_received_bpdu(sp, dp_packet_data(&payload), dp_packet_size(&payload));
9d189a50
EJ
1514 }
1515}
1516
f025bcb7
JR
1517static enum rstp_state
1518xport_get_rstp_port_state(const struct xport *xport)
9efd308e 1519{
f025bcb7
JR
1520 return xport->rstp_port
1521 ? rstp_port_get_state(xport->rstp_port)
1522 : RSTP_DISABLED;
9efd308e
DV
1523}
1524
1525static bool
1526xport_rstp_learn_state(const struct xport *xport)
1527{
4b5f1996
DV
1528 return xport->xbridge->rstp && xport->rstp_port
1529 ? rstp_learn_in_state(xport_get_rstp_port_state(xport))
1530 : true;
9efd308e
DV
1531}
1532
1533static bool
1534xport_rstp_forward_state(const struct xport *xport)
1535{
4b5f1996
DV
1536 return xport->xbridge->rstp && xport->rstp_port
1537 ? rstp_forward_in_state(xport_get_rstp_port_state(xport))
1538 : true;
9efd308e
DV
1539}
1540
1541static bool
1542xport_rstp_should_manage_bpdu(const struct xport *xport)
1543{
f025bcb7 1544 return rstp_should_manage_bpdu(xport_get_rstp_port_state(xport));
9efd308e
DV
1545}
1546
1547static void
cf62fa4c 1548rstp_process_packet(const struct xport *xport, const struct dp_packet *packet)
9efd308e 1549{
cf62fa4c
PS
1550 struct dp_packet payload = *packet;
1551 struct eth_header *eth = dp_packet_data(&payload);
9efd308e 1552
f025bcb7
JR
1553 /* Sink packets on ports that have no RSTP. */
1554 if (!xport->rstp_port) {
9efd308e
DV
1555 return;
1556 }
1557
1558 /* Trim off padding on payload. */
cf62fa4c
PS
1559 if (dp_packet_size(&payload) > ntohs(eth->eth_type) + ETH_HEADER_LEN) {
1560 dp_packet_set_size(&payload, ntohs(eth->eth_type) + ETH_HEADER_LEN);
9efd308e
DV
1561 }
1562
cf62fa4c
PS
1563 if (dp_packet_try_pull(&payload, ETH_HEADER_LEN + LLC_HEADER_LEN)) {
1564 rstp_port_received_bpdu(xport->rstp_port, dp_packet_data(&payload),
1565 dp_packet_size(&payload));
9efd308e
DV
1566 }
1567}
1568
46c88433
EJ
1569static struct xport *
1570get_ofp_port(const struct xbridge *xbridge, ofp_port_t ofp_port)
1571{
1572 struct xport *xport;
1573
1574 HMAP_FOR_EACH_IN_BUCKET (xport, ofp_node, hash_ofp_port(ofp_port),
1575 &xbridge->xports) {
1576 if (xport->ofp_port == ofp_port) {
1577 return xport;
1578 }
1579 }
1580 return NULL;
1581}
1582
1583static odp_port_t
1584ofp_port_to_odp_port(const struct xbridge *xbridge, ofp_port_t ofp_port)
1585{
1586 const struct xport *xport = get_ofp_port(xbridge, ofp_port);
1587 return xport ? xport->odp_port : ODPP_NONE;
1588}
1589
dd8cd4b4
SH
1590static bool
1591odp_port_is_alive(const struct xlate_ctx *ctx, ofp_port_t ofp_port)
1592{
086fa873
BP
1593 struct xport *xport = get_ofp_port(ctx->xbridge, ofp_port);
1594 return xport && xport->may_enable;
dd8cd4b4
SH
1595}
1596
1e684d7d 1597static struct ofputil_bucket *
dd8cd4b4
SH
1598group_first_live_bucket(const struct xlate_ctx *, const struct group_dpif *,
1599 int depth);
1600
1601static bool
1602group_is_alive(const struct xlate_ctx *ctx, uint32_t group_id, int depth)
1603{
1604 struct group_dpif *group;
dd8cd4b4 1605
5d08a275 1606 group = group_dpif_lookup(ctx->xbridge->ofproto, group_id,
1f4a8933 1607 ctx->xin->tables_version, false);
db88b35c 1608 if (group) {
76973237 1609 return group_first_live_bucket(ctx, group, depth) != NULL;
dc25893e 1610 }
dd8cd4b4 1611
dc25893e 1612 return false;
dd8cd4b4
SH
1613}
1614
1615#define MAX_LIVENESS_RECURSION 128 /* Arbitrary limit */
1616
1617static bool
1618bucket_is_alive(const struct xlate_ctx *ctx,
1e684d7d 1619 struct ofputil_bucket *bucket, int depth)
dd8cd4b4
SH
1620{
1621 if (depth >= MAX_LIVENESS_RECURSION) {
2d9b49dd
BP
1622 xlate_report_error(ctx, "bucket chaining exceeded %d links",
1623 MAX_LIVENESS_RECURSION);
dd8cd4b4
SH
1624 return false;
1625 }
1626
fdb1999b
AZ
1627 return (!ofputil_bucket_has_liveness(bucket)
1628 || (bucket->watch_port != OFPP_ANY
1629 && odp_port_is_alive(ctx, bucket->watch_port))
1630 || (bucket->watch_group != OFPG_ANY
1631 && group_is_alive(ctx, bucket->watch_group, depth + 1)));
dd8cd4b4
SH
1632}
1633
1e684d7d 1634static struct ofputil_bucket *
dd8cd4b4
SH
1635group_first_live_bucket(const struct xlate_ctx *ctx,
1636 const struct group_dpif *group, int depth)
1637{
1638 struct ofputil_bucket *bucket;
07a3cd5c 1639 LIST_FOR_EACH (bucket, list_node, &group->up.buckets) {
dd8cd4b4
SH
1640 if (bucket_is_alive(ctx, bucket, depth)) {
1641 return bucket;
1642 }
1643 }
1644
1645 return NULL;
1646}
1647
1e684d7d 1648static struct ofputil_bucket *
fe7e5749
SH
1649group_best_live_bucket(const struct xlate_ctx *ctx,
1650 const struct group_dpif *group,
1651 uint32_t basis)
1652{
1e684d7d 1653 struct ofputil_bucket *best_bucket = NULL;
fe7e5749 1654 uint32_t best_score = 0;
fe7e5749 1655
1e684d7d 1656 struct ofputil_bucket *bucket;
07a3cd5c 1657 LIST_FOR_EACH (bucket, list_node, &group->up.buckets) {
fe7e5749 1658 if (bucket_is_alive(ctx, bucket, 0)) {
c09cb861
LS
1659 uint32_t score =
1660 (hash_int(bucket->bucket_id, basis) & 0xffff) * bucket->weight;
fe7e5749
SH
1661 if (score >= best_score) {
1662 best_bucket = bucket;
1663 best_score = score;
1664 }
1665 }
fe7e5749
SH
1666 }
1667
1668 return best_bucket;
1669}
1670
9583bc14 1671static bool
46c88433 1672xbundle_trunks_vlan(const struct xbundle *bundle, uint16_t vlan)
9583bc14
EJ
1673{
1674 return (bundle->vlan_mode != PORT_VLAN_ACCESS
1675 && (!bundle->trunks || bitmap_is_set(bundle->trunks, vlan)));
1676}
1677
1678static bool
46c88433
EJ
1679xbundle_includes_vlan(const struct xbundle *xbundle, uint16_t vlan)
1680{
1681 return vlan == xbundle->vlan || xbundle_trunks_vlan(xbundle, vlan);
1682}
1683
1684static mirror_mask_t
1685xbundle_mirror_out(const struct xbridge *xbridge, struct xbundle *xbundle)
1686{
1687 return xbundle != &ofpp_none_bundle
1688 ? mirror_bundle_out(xbridge->mbridge, xbundle->ofbundle)
1689 : 0;
1690}
1691
1692static mirror_mask_t
1693xbundle_mirror_src(const struct xbridge *xbridge, struct xbundle *xbundle)
9583bc14 1694{
46c88433
EJ
1695 return xbundle != &ofpp_none_bundle
1696 ? mirror_bundle_src(xbridge->mbridge, xbundle->ofbundle)
1697 : 0;
9583bc14
EJ
1698}
1699
46c88433
EJ
1700static mirror_mask_t
1701xbundle_mirror_dst(const struct xbridge *xbridge, struct xbundle *xbundle)
9583bc14 1702{
46c88433
EJ
1703 return xbundle != &ofpp_none_bundle
1704 ? mirror_bundle_dst(xbridge->mbridge, xbundle->ofbundle)
1705 : 0;
1706}
1707
1708static struct xbundle *
2d9b49dd
BP
1709lookup_input_bundle__(const struct xbridge *xbridge,
1710 ofp_port_t in_port, struct xport **in_xportp)
46c88433
EJ
1711{
1712 struct xport *xport;
9583bc14
EJ
1713
1714 /* Find the port and bundle for the received packet. */
46c88433
EJ
1715 xport = get_ofp_port(xbridge, in_port);
1716 if (in_xportp) {
1717 *in_xportp = xport;
9583bc14 1718 }
46c88433
EJ
1719 if (xport && xport->xbundle) {
1720 return xport->xbundle;
9583bc14
EJ
1721 }
1722
6362203b
YT
1723 /* Special-case OFPP_NONE (OF1.0) and OFPP_CONTROLLER (OF1.1+),
1724 * which a controller may use as the ingress port for traffic that
1725 * it is sourcing. */
1726 if (in_port == OFPP_CONTROLLER || in_port == OFPP_NONE) {
9583bc14
EJ
1727 return &ofpp_none_bundle;
1728 }
2d9b49dd
BP
1729 return NULL;
1730}
9583bc14 1731
2d9b49dd
BP
1732static struct xbundle *
1733lookup_input_bundle(const struct xlate_ctx *ctx,
1734 ofp_port_t in_port, struct xport **in_xportp)
1735{
1736 struct xbundle *xbundle = lookup_input_bundle__(ctx->xbridge,
1737 in_port, in_xportp);
1738 if (!xbundle) {
1739 /* Odd. A few possible reasons here:
1740 *
1741 * - We deleted a port but there are still a few packets queued up
1742 * from it.
1743 *
1744 * - Someone externally added a port (e.g. "ovs-dpctl add-if") that
1745 * we don't know about.
1746 *
1747 * - The ofproto client didn't configure the port as part of a bundle.
1748 * This is particularly likely to happen if a packet was received on
1749 * the port after it was created, but before the client had a chance
1750 * to configure its bundle.
1751 */
94783c7c 1752 xlate_report_error(ctx, "received packet on unknown port %"PRIu32,
2d9b49dd 1753 in_port);
9583bc14 1754 }
2d9b49dd 1755 return xbundle;
9583bc14
EJ
1756}
1757
faa624b4
BP
1758/* Mirrors the packet represented by 'ctx' to appropriate mirror destinations,
1759 * given the packet is ingressing or egressing on 'xbundle', which has ingress
1760 * or egress (as appropriate) mirrors 'mirrors'. */
9583bc14 1761static void
7efbc3b7
BP
1762mirror_packet(struct xlate_ctx *ctx, struct xbundle *xbundle,
1763 mirror_mask_t mirrors)
9583bc14 1764{
faa624b4
BP
1765 /* Figure out what VLAN the packet is in (because mirrors can select
1766 * packets on basis of VLAN). */
7efbc3b7 1767 uint16_t vid = vlan_tci_to_vid(ctx->xin->flow.vlan_tci);
2d9b49dd 1768 if (!input_vid_is_valid(ctx, vid, xbundle)) {
9583bc14
EJ
1769 return;
1770 }
7efbc3b7 1771 uint16_t vlan = input_vid_to_vlan(xbundle, vid);
9583bc14 1772
7efbc3b7 1773 const struct xbridge *xbridge = ctx->xbridge;
9583bc14 1774
7efbc3b7
BP
1775 /* Don't mirror to destinations that we've already mirrored to. */
1776 mirrors &= ~ctx->mirrors;
9583bc14
EJ
1777 if (!mirrors) {
1778 return;
1779 }
1780
7efbc3b7
BP
1781 if (ctx->xin->resubmit_stats) {
1782 mirror_update_stats(xbridge->mbridge, mirrors,
1783 ctx->xin->resubmit_stats->n_packets,
1784 ctx->xin->resubmit_stats->n_bytes);
1785 }
1786 if (ctx->xin->xcache) {
1787 struct xc_entry *entry;
1788
1789 entry = xlate_cache_add_entry(ctx->xin->xcache, XC_MIRROR);
901a517e
JR
1790 entry->mirror.mbridge = mbridge_ref(xbridge->mbridge);
1791 entry->mirror.mirrors = mirrors;
7efbc3b7 1792 }
9583bc14 1793
faa624b4
BP
1794 /* 'mirrors' is a bit-mask of candidates for mirroring. Iterate as long as
1795 * some candidates remain. */
9583bc14 1796 while (mirrors) {
7efbc3b7 1797 const unsigned long *vlans;
ec7ceaed
EJ
1798 mirror_mask_t dup_mirrors;
1799 struct ofbundle *out;
ec7ceaed 1800 int out_vlan;
1356dbd1 1801 int snaplen;
ec7ceaed 1802
faa624b4 1803 /* Get the details of the mirror represented by the rightmost 1-bit. */
7efbc3b7 1804 bool has_mirror = mirror_get(xbridge->mbridge, raw_ctz(mirrors),
1356dbd1
WT
1805 &vlans, &dup_mirrors,
1806 &out, &snaplen, &out_vlan);
ec7ceaed
EJ
1807 ovs_assert(has_mirror);
1808
1356dbd1 1809
faa624b4
BP
1810 /* If this mirror selects on the basis of VLAN, and it does not select
1811 * 'vlan', then discard this mirror and go on to the next one. */
ec7ceaed 1812 if (vlans) {
49a73e0c 1813 ctx->wc->masks.vlan_tci |= htons(VLAN_CFI | VLAN_VID_MASK);
9583bc14 1814 }
7efbc3b7 1815 if (vlans && !bitmap_is_set(vlans, vlan)) {
9583bc14
EJ
1816 mirrors = zero_rightmost_1bit(mirrors);
1817 continue;
1818 }
1819
faa624b4
BP
1820 /* Record the mirror, and the mirrors that output to the same
1821 * destination, so that we don't mirror to them again. This must be
1822 * done now to ensure that output_normal(), below, doesn't recursively
1823 * output to the same mirrors. */
3d6151f3 1824 ctx->mirrors |= dup_mirrors;
1356dbd1 1825 ctx->mirror_snaplen = snaplen;
faa624b4
BP
1826
1827 /* Send the packet to the mirror. */
ec7ceaed 1828 if (out) {
84f0f298
RW
1829 struct xlate_cfg *xcfg = ovsrcu_get(struct xlate_cfg *, &xcfgp);
1830 struct xbundle *out_xbundle = xbundle_lookup(xcfg, out);
46c88433
EJ
1831 if (out_xbundle) {
1832 output_normal(ctx, out_xbundle, vlan);
1833 }
ec7ceaed 1834 } else if (vlan != out_vlan
7efbc3b7 1835 && !eth_addr_is_reserved(ctx->xin->flow.dl_dst)) {
46c88433 1836 struct xbundle *xbundle;
9583bc14 1837
46c88433
EJ
1838 LIST_FOR_EACH (xbundle, list_node, &xbridge->xbundles) {
1839 if (xbundle_includes_vlan(xbundle, out_vlan)
1840 && !xbundle_mirror_out(xbridge, xbundle)) {
1841 output_normal(ctx, xbundle, out_vlan);
9583bc14
EJ
1842 }
1843 }
1844 }
faa624b4
BP
1845
1846 /* output_normal() could have recursively output (to different
1847 * mirrors), so make sure that we don't send duplicates. */
1848 mirrors &= ~ctx->mirrors;
1356dbd1 1849 ctx->mirror_snaplen = 0;
9583bc14
EJ
1850 }
1851}
1852
7efbc3b7
BP
1853static void
1854mirror_ingress_packet(struct xlate_ctx *ctx)
1855{
1856 if (mbridge_has_mirrors(ctx->xbridge->mbridge)) {
7efbc3b7 1857 struct xbundle *xbundle = lookup_input_bundle(
2d9b49dd 1858 ctx, ctx->xin->flow.in_port.ofp_port, NULL);
7efbc3b7
BP
1859 if (xbundle) {
1860 mirror_packet(ctx, xbundle,
1861 xbundle_mirror_src(ctx->xbridge, xbundle));
1862 }
1863 }
1864}
1865
9583bc14 1866/* Given 'vid', the VID obtained from the 802.1Q header that was received as
46c88433 1867 * part of a packet (specify 0 if there was no 802.1Q header), and 'in_xbundle',
9583bc14
EJ
1868 * the bundle on which the packet was received, returns the VLAN to which the
1869 * packet belongs.
1870 *
1871 * Both 'vid' and the return value are in the range 0...4095. */
1872static uint16_t
46c88433 1873input_vid_to_vlan(const struct xbundle *in_xbundle, uint16_t vid)
9583bc14 1874{
46c88433 1875 switch (in_xbundle->vlan_mode) {
9583bc14 1876 case PORT_VLAN_ACCESS:
46c88433 1877 return in_xbundle->vlan;
9583bc14
EJ
1878 break;
1879
1880 case PORT_VLAN_TRUNK:
1881 return vid;
1882
1883 case PORT_VLAN_NATIVE_UNTAGGED:
1884 case PORT_VLAN_NATIVE_TAGGED:
46c88433 1885 return vid ? vid : in_xbundle->vlan;
9583bc14
EJ
1886
1887 default:
428b2edd 1888 OVS_NOT_REACHED();
9583bc14
EJ
1889 }
1890}
1891
46c88433 1892/* Checks whether a packet with the given 'vid' may ingress on 'in_xbundle'.
2d9b49dd 1893 * If so, returns true. Otherwise, returns false.
9583bc14
EJ
1894 *
1895 * 'vid' should be the VID obtained from the 802.1Q header that was received as
1896 * part of a packet (specify 0 if there was no 802.1Q header), in the range
1897 * 0...4095. */
1898static bool
2d9b49dd
BP
1899input_vid_is_valid(const struct xlate_ctx *ctx,
1900 uint16_t vid, struct xbundle *in_xbundle)
9583bc14
EJ
1901{
1902 /* Allow any VID on the OFPP_NONE port. */
46c88433 1903 if (in_xbundle == &ofpp_none_bundle) {
9583bc14
EJ
1904 return true;
1905 }
1906
46c88433 1907 switch (in_xbundle->vlan_mode) {
9583bc14
EJ
1908 case PORT_VLAN_ACCESS:
1909 if (vid) {
2d9b49dd
BP
1910 xlate_report_error(ctx, "dropping VLAN %"PRIu16" tagged "
1911 "packet received on port %s configured as VLAN "
1912 "%"PRIu16" access port", vid, in_xbundle->name,
1913 in_xbundle->vlan);
9583bc14
EJ
1914 return false;
1915 }
1916 return true;
1917
1918 case PORT_VLAN_NATIVE_UNTAGGED:
1919 case PORT_VLAN_NATIVE_TAGGED:
1920 if (!vid) {
1921 /* Port must always carry its native VLAN. */
1922 return true;
1923 }
1924 /* Fall through. */
1925 case PORT_VLAN_TRUNK:
46c88433 1926 if (!xbundle_includes_vlan(in_xbundle, vid)) {
2d9b49dd
BP
1927 xlate_report_error(ctx, "dropping VLAN %"PRIu16" packet "
1928 "received on port %s not configured for "
1929 "trunking VLAN %"PRIu16,
1930 vid, in_xbundle->name, vid);
9583bc14
EJ
1931 return false;
1932 }
1933 return true;
1934
1935 default:
428b2edd 1936 OVS_NOT_REACHED();
9583bc14
EJ
1937 }
1938
1939}
1940
1941/* Given 'vlan', the VLAN that a packet belongs to, and
46c88433 1942 * 'out_xbundle', a bundle on which the packet is to be output, returns the VID
9583bc14
EJ
1943 * that should be included in the 802.1Q header. (If the return value is 0,
1944 * then the 802.1Q header should only be included in the packet if there is a
1945 * nonzero PCP.)
1946 *
1947 * Both 'vlan' and the return value are in the range 0...4095. */
1948static uint16_t
46c88433 1949output_vlan_to_vid(const struct xbundle *out_xbundle, uint16_t vlan)
9583bc14 1950{
46c88433 1951 switch (out_xbundle->vlan_mode) {
9583bc14
EJ
1952 case PORT_VLAN_ACCESS:
1953 return 0;
1954
1955 case PORT_VLAN_TRUNK:
1956 case PORT_VLAN_NATIVE_TAGGED:
1957 return vlan;
1958
1959 case PORT_VLAN_NATIVE_UNTAGGED:
46c88433 1960 return vlan == out_xbundle->vlan ? 0 : vlan;
9583bc14
EJ
1961
1962 default:
428b2edd 1963 OVS_NOT_REACHED();
9583bc14
EJ
1964 }
1965}
1966
1967static void
46c88433 1968output_normal(struct xlate_ctx *ctx, const struct xbundle *out_xbundle,
9583bc14
EJ
1969 uint16_t vlan)
1970{
33bf9176 1971 ovs_be16 *flow_tci = &ctx->xin->flow.vlan_tci;
9583bc14
EJ
1972 uint16_t vid;
1973 ovs_be16 tci, old_tci;
46c88433 1974 struct xport *xport;
e93ef1c7
JR
1975 struct xlate_bond_recirc xr;
1976 bool use_recirc = false;
9583bc14 1977
46c88433 1978 vid = output_vlan_to_vid(out_xbundle, vlan);
417e7e66 1979 if (ovs_list_is_empty(&out_xbundle->xports)) {
46c88433
EJ
1980 /* Partially configured bundle with no slaves. Drop the packet. */
1981 return;
1982 } else if (!out_xbundle->bond) {
417e7e66 1983 xport = CONTAINER_OF(ovs_list_front(&out_xbundle->xports), struct xport,
46c88433 1984 bundle_node);
9583bc14 1985 } else {
84f0f298 1986 struct xlate_cfg *xcfg = ovsrcu_get(struct xlate_cfg *, &xcfgp);
49a73e0c 1987 struct flow_wildcards *wc = ctx->wc;
84f0f298 1988 struct ofport_dpif *ofport;
adcf00ba 1989
82f9f1f5 1990 if (ctx->xbridge->support.odp.recirc
6b95d23c 1991 && bond_may_recirc(out_xbundle->bond)) {
82f9f1f5
AZ
1992 /* To avoid unnecessary locking, bond_may_recirc() is first
1993 * called outside of the 'rwlock'. After acquiring the lock,
1994 * bond_update_post_recirc_rules() will check again to make
1995 * sure bond configuration has not been changed.
1996 *
1997 * In case recirculation is not actually in use, 'xr.recirc_id'
1998 * will be set to '0', Since a valid 'recirc_id' can
1999 * not be zero. */
2000 bond_update_post_recirc_rules(out_xbundle->bond,
2001 &xr.recirc_id,
2002 &xr.hash_basis);
2003 if (xr.recirc_id) {
2004 /* Use recirculation instead of output. */
2005 use_recirc = true;
e93ef1c7 2006 xr.hash_alg = OVS_HASH_ALG_L4;
54ecb5a2
AZ
2007 /* Recirculation does not require unmasking hash fields. */
2008 wc = NULL;
adcf00ba
AZ
2009 }
2010 }
46c88433 2011
54ecb5a2
AZ
2012 ofport = bond_choose_output_slave(out_xbundle->bond,
2013 &ctx->xin->flow, wc, vid);
84f0f298 2014 xport = xport_lookup(xcfg, ofport);
46c88433
EJ
2015
2016 if (!xport) {
9583bc14
EJ
2017 /* No slaves enabled, so drop packet. */
2018 return;
2019 }
d6fc5f57 2020
e93ef1c7 2021 /* If use_recirc is set, the main thread will handle stats
b256dc52 2022 * accounting for this bond. */
e93ef1c7 2023 if (!use_recirc) {
b256dc52
JS
2024 if (ctx->xin->resubmit_stats) {
2025 bond_account(out_xbundle->bond, &ctx->xin->flow, vid,
2026 ctx->xin->resubmit_stats->n_bytes);
2027 }
2028 if (ctx->xin->xcache) {
2029 struct xc_entry *entry;
2030 struct flow *flow;
2031
2032 flow = &ctx->xin->flow;
2033 entry = xlate_cache_add_entry(ctx->xin->xcache, XC_BOND);
901a517e
JR
2034 entry->bond.bond = bond_ref(out_xbundle->bond);
2035 entry->bond.flow = xmemdup(flow, sizeof *flow);
2036 entry->bond.vid = vid;
b256dc52 2037 }
d6fc5f57 2038 }
9583bc14
EJ
2039 }
2040
33bf9176 2041 old_tci = *flow_tci;
9583bc14 2042 tci = htons(vid);
46c88433 2043 if (tci || out_xbundle->use_priority_tags) {
33bf9176 2044 tci |= *flow_tci & htons(VLAN_PCP_MASK);
9583bc14
EJ
2045 if (tci) {
2046 tci |= htons(VLAN_CFI);
2047 }
2048 }
33bf9176 2049 *flow_tci = tci;
9583bc14 2050
e93ef1c7 2051 compose_output_action(ctx, xport->ofp_port, use_recirc ? &xr : NULL);
33bf9176 2052 *flow_tci = old_tci;
9583bc14
EJ
2053}
2054
2055/* A VM broadcasts a gratuitous ARP to indicate that it has resumed after
2056 * migration. Older Citrix-patched Linux DomU used gratuitous ARP replies to
2057 * indicate this; newer upstream kernels use gratuitous ARP requests. */
2058static bool
2059is_gratuitous_arp(const struct flow *flow, struct flow_wildcards *wc)
2060{
2061 if (flow->dl_type != htons(ETH_TYPE_ARP)) {
2062 return false;
2063 }
2064
2065 memset(&wc->masks.dl_dst, 0xff, sizeof wc->masks.dl_dst);
2066 if (!eth_addr_is_broadcast(flow->dl_dst)) {
2067 return false;
2068 }
2069
2070 memset(&wc->masks.nw_proto, 0xff, sizeof wc->masks.nw_proto);
2071 if (flow->nw_proto == ARP_OP_REPLY) {
2072 return true;
2073 } else if (flow->nw_proto == ARP_OP_REQUEST) {
2074 memset(&wc->masks.nw_src, 0xff, sizeof wc->masks.nw_src);
2075 memset(&wc->masks.nw_dst, 0xff, sizeof wc->masks.nw_dst);
2076
2077 return flow->nw_src == flow->nw_dst;
2078 } else {
2079 return false;
2080 }
2081}
2082
ff69c24a
FL
2083/* Determines whether packets in 'flow' within 'xbridge' should be forwarded or
2084 * dropped. Returns true if they may be forwarded, false if they should be
2085 * dropped.
2086 *
2087 * 'in_port' must be the xport that corresponds to flow->in_port.
2088 * 'in_port' must be part of a bundle (e.g. in_port->bundle must be nonnull).
2089 *
2090 * 'vlan' must be the VLAN that corresponds to flow->vlan_tci on 'in_port', as
2091 * returned by input_vid_to_vlan(). It must be a valid VLAN for 'in_port', as
2092 * checked by input_vid_is_valid().
2093 *
2094 * May also add tags to '*tags', although the current implementation only does
2095 * so in one special case.
2096 */
2097static bool
2098is_admissible(struct xlate_ctx *ctx, struct xport *in_port,
2099 uint16_t vlan)
2100{
2101 struct xbundle *in_xbundle = in_port->xbundle;
2102 const struct xbridge *xbridge = ctx->xbridge;
2103 struct flow *flow = &ctx->xin->flow;
2104
2105 /* Drop frames for reserved multicast addresses
2106 * only if forward_bpdu option is absent. */
2107 if (!xbridge->forward_bpdu && eth_addr_is_reserved(flow->dl_dst)) {
2d9b49dd
BP
2108 xlate_report(ctx, OFT_DETAIL,
2109 "packet has reserved destination MAC, dropping");
ff69c24a
FL
2110 return false;
2111 }
2112
2113 if (in_xbundle->bond) {
2114 struct mac_entry *mac;
2115
2116 switch (bond_check_admissibility(in_xbundle->bond, in_port->ofport,
2117 flow->dl_dst)) {
2118 case BV_ACCEPT:
2119 break;
2120
2121 case BV_DROP:
2d9b49dd
BP
2122 xlate_report(ctx, OFT_DETAIL,
2123 "bonding refused admissibility, dropping");
ff69c24a
FL
2124 return false;
2125
2126 case BV_DROP_IF_MOVED:
2127 ovs_rwlock_rdlock(&xbridge->ml->rwlock);
2128 mac = mac_learning_lookup(xbridge->ml, flow->dl_src, vlan);
9d078ec2
BP
2129 if (mac
2130 && mac_entry_get_port(xbridge->ml, mac) != in_xbundle->ofbundle
49a73e0c 2131 && (!is_gratuitous_arp(flow, ctx->wc)
9d078ec2 2132 || mac_entry_is_grat_arp_locked(mac))) {
ff69c24a 2133 ovs_rwlock_unlock(&xbridge->ml->rwlock);
2d9b49dd
BP
2134 xlate_report(ctx, OFT_DETAIL,
2135 "SLB bond thinks this packet looped back, "
ff69c24a
FL
2136 "dropping");
2137 return false;
2138 }
2139 ovs_rwlock_unlock(&xbridge->ml->rwlock);
2140 break;
2141 }
2142 }
2143
2144 return true;
2145}
2146
2d9b49dd
BP
2147static bool
2148update_learning_table__(const struct xbridge *xbridge,
2149 struct xbundle *in_xbundle, struct eth_addr dl_src,
2150 int vlan, bool is_grat_arp)
2151{
2152 return (in_xbundle == &ofpp_none_bundle
2153 || !mac_learning_update(xbridge->ml, dl_src, vlan,
2154 is_grat_arp,
2155 in_xbundle->bond != NULL,
2156 in_xbundle->ofbundle));
2157}
2158
ee047520 2159static void
2d9b49dd 2160update_learning_table(const struct xlate_ctx *ctx,
064799a1
JR
2161 struct xbundle *in_xbundle, struct eth_addr dl_src,
2162 int vlan, bool is_grat_arp)
ee047520 2163{
2d9b49dd
BP
2164 if (!update_learning_table__(ctx->xbridge, in_xbundle, dl_src, vlan,
2165 is_grat_arp)) {
2166 xlate_report_debug(ctx, OFT_DETAIL, "learned that "ETH_ADDR_FMT" is "
2167 "on port %s in VLAN %d",
2168 ETH_ADDR_ARGS(dl_src), in_xbundle->name, vlan);
ee047520 2169 }
9583bc14
EJ
2170}
2171
86e2dcdd
FL
2172/* Updates multicast snooping table 'ms' given that a packet matching 'flow'
2173 * was received on 'in_xbundle' in 'vlan' and is either Report or Query. */
2174static void
2d9b49dd 2175update_mcast_snooping_table4__(const struct xlate_ctx *ctx,
06994f87
TLSC
2176 const struct flow *flow,
2177 struct mcast_snooping *ms, int vlan,
2178 struct xbundle *in_xbundle,
2179 const struct dp_packet *packet)
86e2dcdd
FL
2180 OVS_REQ_WRLOCK(ms->rwlock)
2181{
46445c63 2182 const struct igmp_header *igmp;
e3102e42 2183 int count;
46445c63 2184 size_t offset;
06994f87 2185 ovs_be32 ip4 = flow->igmp_group_ip4;
86e2dcdd 2186
46445c63
EC
2187 offset = (char *) dp_packet_l4(packet) - (char *) dp_packet_data(packet);
2188 igmp = dp_packet_at(packet, offset, IGMP_HEADER_LEN);
2189 if (!igmp || csum(igmp, dp_packet_l4_size(packet)) != 0) {
2d9b49dd
BP
2190 xlate_report_debug(ctx, OFT_DETAIL,
2191 "multicast snooping received bad IGMP "
2192 "checksum on port %s in VLAN %d",
2193 in_xbundle->name, vlan);
46445c63
EC
2194 return;
2195 }
2196
86e2dcdd
FL
2197 switch (ntohs(flow->tp_src)) {
2198 case IGMP_HOST_MEMBERSHIP_REPORT:
2199 case IGMPV2_HOST_MEMBERSHIP_REPORT:
964a4d5f 2200 if (mcast_snooping_add_group4(ms, ip4, vlan, in_xbundle->ofbundle)) {
2d9b49dd
BP
2201 xlate_report_debug(ctx, OFT_DETAIL,
2202 "multicast snooping learned that "
2203 IP_FMT" is on port %s in VLAN %d",
2204 IP_ARGS(ip4), in_xbundle->name, vlan);
86e2dcdd
FL
2205 }
2206 break;
2207 case IGMP_HOST_LEAVE_MESSAGE:
964a4d5f 2208 if (mcast_snooping_leave_group4(ms, ip4, vlan, in_xbundle->ofbundle)) {
2d9b49dd
BP
2209 xlate_report_debug(ctx, OFT_DETAIL, "multicast snooping leaving "
2210 IP_FMT" is on port %s in VLAN %d",
2211 IP_ARGS(ip4), in_xbundle->name, vlan);
86e2dcdd
FL
2212 }
2213 break;
2214 case IGMP_HOST_MEMBERSHIP_QUERY:
2215 if (flow->nw_src && mcast_snooping_add_mrouter(ms, vlan,
2d9b49dd
BP
2216 in_xbundle->ofbundle)) {
2217 xlate_report_debug(ctx, OFT_DETAIL, "multicast snooping query "
2218 "from "IP_FMT" is on port %s in VLAN %d",
2219 IP_ARGS(flow->nw_src), in_xbundle->name, vlan);
86e2dcdd
FL
2220 }
2221 break;
e3102e42 2222 case IGMPV3_HOST_MEMBERSHIP_REPORT:
2d9b49dd
BP
2223 count = mcast_snooping_add_report(ms, packet, vlan,
2224 in_xbundle->ofbundle);
2225 if (count) {
2226 xlate_report_debug(ctx, OFT_DETAIL, "multicast snooping processed "
2227 "%d addresses on port %s in VLAN %d",
2228 count, in_xbundle->name, vlan);
e3102e42
TLSC
2229 }
2230 break;
86e2dcdd
FL
2231 }
2232}
2233
06994f87 2234static void
2d9b49dd 2235update_mcast_snooping_table6__(const struct xlate_ctx *ctx,
06994f87
TLSC
2236 const struct flow *flow,
2237 struct mcast_snooping *ms, int vlan,
2238 struct xbundle *in_xbundle,
2239 const struct dp_packet *packet)
2240 OVS_REQ_WRLOCK(ms->rwlock)
2241{
46445c63 2242 const struct mld_header *mld;
06994f87 2243 int count;
46445c63
EC
2244 size_t offset;
2245
2246 offset = (char *) dp_packet_l4(packet) - (char *) dp_packet_data(packet);
2247 mld = dp_packet_at(packet, offset, MLD_HEADER_LEN);
2248
2249 if (!mld ||
2250 packet_csum_upperlayer6(dp_packet_l3(packet),
2251 mld, IPPROTO_ICMPV6,
2252 dp_packet_l4_size(packet)) != 0) {
2d9b49dd
BP
2253 xlate_report_debug(ctx, OFT_DETAIL, "multicast snooping received "
2254 "bad MLD checksum on port %s in VLAN %d",
2255 in_xbundle->name, vlan);
46445c63
EC
2256 return;
2257 }
06994f87
TLSC
2258
2259 switch (ntohs(flow->tp_src)) {
2260 case MLD_QUERY:
2261 if (!ipv6_addr_equals(&flow->ipv6_src, &in6addr_any)
2262 && mcast_snooping_add_mrouter(ms, vlan, in_xbundle->ofbundle)) {
2d9b49dd
BP
2263 xlate_report_debug(ctx, OFT_DETAIL, "multicast snooping query on "
2264 "port %s in VLAN %d", in_xbundle->name, vlan);
06994f87
TLSC
2265 }
2266 break;
2267 case MLD_REPORT:
2268 case MLD_DONE:
2269 case MLD2_REPORT:
2270 count = mcast_snooping_add_mld(ms, packet, vlan, in_xbundle->ofbundle);
2271 if (count) {
2d9b49dd
BP
2272 xlate_report_debug(ctx, OFT_DETAIL, "multicast snooping processed "
2273 "%d addresses on port %s in VLAN %d",
2274 count, in_xbundle->name, vlan);
06994f87
TLSC
2275 }
2276 break;
2277 }
2278}
2279
86e2dcdd
FL
2280/* Updates multicast snooping table 'ms' given that a packet matching 'flow'
2281 * was received on 'in_xbundle' in 'vlan'. */
2282static void
2d9b49dd 2283update_mcast_snooping_table(const struct xlate_ctx *ctx,
86e2dcdd 2284 const struct flow *flow, int vlan,
e3102e42
TLSC
2285 struct xbundle *in_xbundle,
2286 const struct dp_packet *packet)
86e2dcdd 2287{
2d9b49dd 2288 struct mcast_snooping *ms = ctx->xbridge->ms;
86e2dcdd
FL
2289 struct xlate_cfg *xcfg;
2290 struct xbundle *mcast_xbundle;
f4ae6e23 2291 struct mcast_port_bundle *fport;
86e2dcdd
FL
2292
2293 /* Don't learn the OFPP_NONE port. */
2294 if (in_xbundle == &ofpp_none_bundle) {
2295 return;
2296 }
2297
2298 /* Don't learn from flood ports */
2299 mcast_xbundle = NULL;
2300 ovs_rwlock_wrlock(&ms->rwlock);
2301 xcfg = ovsrcu_get(struct xlate_cfg *, &xcfgp);
f4ae6e23 2302 LIST_FOR_EACH(fport, node, &ms->fport_list) {
86e2dcdd
FL
2303 mcast_xbundle = xbundle_lookup(xcfg, fport->port);
2304 if (mcast_xbundle == in_xbundle) {
2305 break;
2306 }
2307 }
2308
2309 if (!mcast_xbundle || mcast_xbundle != in_xbundle) {
06994f87 2310 if (flow->dl_type == htons(ETH_TYPE_IP)) {
2d9b49dd 2311 update_mcast_snooping_table4__(ctx, flow, ms, vlan,
06994f87
TLSC
2312 in_xbundle, packet);
2313 } else {
2d9b49dd 2314 update_mcast_snooping_table6__(ctx, flow, ms, vlan,
06994f87
TLSC
2315 in_xbundle, packet);
2316 }
86e2dcdd
FL
2317 }
2318 ovs_rwlock_unlock(&ms->rwlock);
2319}
2320
2321/* send the packet to ports having the multicast group learned */
2322static void
2323xlate_normal_mcast_send_group(struct xlate_ctx *ctx,
2324 struct mcast_snooping *ms OVS_UNUSED,
2325 struct mcast_group *grp,
2326 struct xbundle *in_xbundle, uint16_t vlan)
2327 OVS_REQ_RDLOCK(ms->rwlock)
2328{
2329 struct xlate_cfg *xcfg;
2330 struct mcast_group_bundle *b;
2331 struct xbundle *mcast_xbundle;
2332
2333 xcfg = ovsrcu_get(struct xlate_cfg *, &xcfgp);
2334 LIST_FOR_EACH(b, bundle_node, &grp->bundle_lru) {
2335 mcast_xbundle = xbundle_lookup(xcfg, b->port);
2336 if (mcast_xbundle && mcast_xbundle != in_xbundle) {
2d9b49dd 2337 xlate_report(ctx, OFT_DETAIL, "forwarding to mcast group port");
86e2dcdd
FL
2338 output_normal(ctx, mcast_xbundle, vlan);
2339 } else if (!mcast_xbundle) {
2d9b49dd
BP
2340 xlate_report(ctx, OFT_WARN,
2341 "mcast group port is unknown, dropping");
86e2dcdd 2342 } else {
2d9b49dd
BP
2343 xlate_report(ctx, OFT_DETAIL,
2344 "mcast group port is input port, dropping");
86e2dcdd
FL
2345 }
2346 }
2347}
2348
2349/* send the packet to ports connected to multicast routers */
2350static void
2351xlate_normal_mcast_send_mrouters(struct xlate_ctx *ctx,
2352 struct mcast_snooping *ms,
2353 struct xbundle *in_xbundle, uint16_t vlan)
2354 OVS_REQ_RDLOCK(ms->rwlock)
2355{
2356 struct xlate_cfg *xcfg;
2357 struct mcast_mrouter_bundle *mrouter;
2358 struct xbundle *mcast_xbundle;
2359
2360 xcfg = ovsrcu_get(struct xlate_cfg *, &xcfgp);
2361 LIST_FOR_EACH(mrouter, mrouter_node, &ms->mrouter_lru) {
2362 mcast_xbundle = xbundle_lookup(xcfg, mrouter->port);
94a881c1
RD
2363 if (mcast_xbundle && mcast_xbundle != in_xbundle
2364 && mrouter->vlan == vlan) {
2d9b49dd 2365 xlate_report(ctx, OFT_DETAIL, "forwarding to mcast router port");
86e2dcdd
FL
2366 output_normal(ctx, mcast_xbundle, vlan);
2367 } else if (!mcast_xbundle) {
2d9b49dd
BP
2368 xlate_report(ctx, OFT_WARN,
2369 "mcast router port is unknown, dropping");
94a881c1 2370 } else if (mrouter->vlan != vlan) {
2d9b49dd
BP
2371 xlate_report(ctx, OFT_DETAIL,
2372 "mcast router is on another vlan, dropping");
86e2dcdd 2373 } else {
2d9b49dd
BP
2374 xlate_report(ctx, OFT_DETAIL,
2375 "mcast router port is input port, dropping");
86e2dcdd
FL
2376 }
2377 }
2378}
2379
2380/* send the packet to ports flagged to be flooded */
2381static void
2382xlate_normal_mcast_send_fports(struct xlate_ctx *ctx,
2383 struct mcast_snooping *ms,
2384 struct xbundle *in_xbundle, uint16_t vlan)
2385 OVS_REQ_RDLOCK(ms->rwlock)
2386{
2387 struct xlate_cfg *xcfg;
f4ae6e23 2388 struct mcast_port_bundle *fport;
86e2dcdd
FL
2389 struct xbundle *mcast_xbundle;
2390
2391 xcfg = ovsrcu_get(struct xlate_cfg *, &xcfgp);
f4ae6e23 2392 LIST_FOR_EACH(fport, node, &ms->fport_list) {
86e2dcdd
FL
2393 mcast_xbundle = xbundle_lookup(xcfg, fport->port);
2394 if (mcast_xbundle && mcast_xbundle != in_xbundle) {
2d9b49dd 2395 xlate_report(ctx, OFT_DETAIL, "forwarding to mcast flood port");
86e2dcdd
FL
2396 output_normal(ctx, mcast_xbundle, vlan);
2397 } else if (!mcast_xbundle) {
2d9b49dd
BP
2398 xlate_report(ctx, OFT_WARN,
2399 "mcast flood port is unknown, dropping");
86e2dcdd 2400 } else {
2d9b49dd
BP
2401 xlate_report(ctx, OFT_DETAIL,
2402 "mcast flood port is input port, dropping");
86e2dcdd
FL
2403 }
2404 }
2405}
2406
8e04a33f
FL
2407/* forward the Reports to configured ports */
2408static void
2409xlate_normal_mcast_send_rports(struct xlate_ctx *ctx,
2410 struct mcast_snooping *ms,
2411 struct xbundle *in_xbundle, uint16_t vlan)
2412 OVS_REQ_RDLOCK(ms->rwlock)
2413{
2414 struct xlate_cfg *xcfg;
2415 struct mcast_port_bundle *rport;
2416 struct xbundle *mcast_xbundle;
2417
2418 xcfg = ovsrcu_get(struct xlate_cfg *, &xcfgp);
2419 LIST_FOR_EACH(rport, node, &ms->rport_list) {
2420 mcast_xbundle = xbundle_lookup(xcfg, rport->port);
2421 if (mcast_xbundle && mcast_xbundle != in_xbundle) {
2d9b49dd
BP
2422 xlate_report(ctx, OFT_DETAIL,
2423 "forwarding report to mcast flagged port");
8e04a33f
FL
2424 output_normal(ctx, mcast_xbundle, vlan);
2425 } else if (!mcast_xbundle) {
2d9b49dd
BP
2426 xlate_report(ctx, OFT_WARN,
2427 "mcast port is unknown, dropping the report");
8e04a33f 2428 } else {
2d9b49dd
BP
2429 xlate_report(ctx, OFT_DETAIL,
2430 "mcast port is input port, dropping the Report");
8e04a33f
FL
2431 }
2432 }
2433}
2434
682800a4
FL
2435static void
2436xlate_normal_flood(struct xlate_ctx *ctx, struct xbundle *in_xbundle,
2437 uint16_t vlan)
2438{
2439 struct xbundle *xbundle;
2440
2441 LIST_FOR_EACH (xbundle, list_node, &ctx->xbridge->xbundles) {
2442 if (xbundle != in_xbundle
2443 && xbundle_includes_vlan(xbundle, vlan)
2444 && xbundle->floodable
2445 && !xbundle_mirror_out(ctx->xbridge, xbundle)) {
2446 output_normal(ctx, xbundle, vlan);
2447 }
2448 }
2031ef97 2449 ctx->nf_output_iface = NF_OUT_FLOOD;
682800a4
FL
2450}
2451
a75636c8
BP
2452static bool
2453is_ip_local_multicast(const struct flow *flow, struct flow_wildcards *wc)
2454{
2455 if (flow->dl_type == htons(ETH_TYPE_IP)) {
2456 memset(&wc->masks.nw_dst, 0xff, sizeof wc->masks.nw_dst);
2457 return ip_is_local_multicast(flow->nw_dst);
2458 } else if (flow->dl_type == htons(ETH_TYPE_IPV6)) {
2459 memset(&wc->masks.ipv6_dst, 0xff, sizeof wc->masks.ipv6_dst);
2460 return ipv6_is_all_hosts(&flow->ipv6_dst);
2461 } else {
2462 return false;
2463 }
2464}
2465
9583bc14
EJ
2466static void
2467xlate_normal(struct xlate_ctx *ctx)
2468{
49a73e0c 2469 struct flow_wildcards *wc = ctx->wc;
33bf9176 2470 struct flow *flow = &ctx->xin->flow;
46c88433
EJ
2471 struct xbundle *in_xbundle;
2472 struct xport *in_port;
9583bc14 2473 struct mac_entry *mac;
d6d5bbc9 2474 void *mac_port;
9583bc14
EJ
2475 uint16_t vlan;
2476 uint16_t vid;
2477
33bf9176
BP
2478 memset(&wc->masks.dl_src, 0xff, sizeof wc->masks.dl_src);
2479 memset(&wc->masks.dl_dst, 0xff, sizeof wc->masks.dl_dst);
1dd35f8a 2480 wc->masks.vlan_tci |= htons(VLAN_VID_MASK | VLAN_CFI);
9583bc14 2481
2d9b49dd 2482 in_xbundle = lookup_input_bundle(ctx, flow->in_port.ofp_port, &in_port);
46c88433 2483 if (!in_xbundle) {
2d9b49dd 2484 xlate_report(ctx, OFT_WARN, "no input bundle, dropping");
9583bc14
EJ
2485 return;
2486 }
2487
2488 /* Drop malformed frames. */
33bf9176
BP
2489 if (flow->dl_type == htons(ETH_TYPE_VLAN) &&
2490 !(flow->vlan_tci & htons(VLAN_CFI))) {
9583bc14 2491 if (ctx->xin->packet != NULL) {
2d9b49dd
BP
2492 xlate_report_error(ctx, "dropping packet with partial "
2493 "VLAN tag received on port %s",
2494 in_xbundle->name);
9583bc14 2495 }
2d9b49dd 2496 xlate_report(ctx, OFT_WARN, "partial VLAN tag, dropping");
9583bc14
EJ
2497 return;
2498 }
2499
2500 /* Drop frames on bundles reserved for mirroring. */
46c88433 2501 if (xbundle_mirror_out(ctx->xbridge, in_xbundle)) {
9583bc14 2502 if (ctx->xin->packet != NULL) {
2d9b49dd
BP
2503 xlate_report_error(ctx, "dropping packet received on port %s, "
2504 "which is reserved exclusively for mirroring",
2505 in_xbundle->name);
9583bc14 2506 }
2d9b49dd
BP
2507 xlate_report(ctx, OFT_WARN,
2508 "input port is mirror output port, dropping");
9583bc14
EJ
2509 return;
2510 }
2511
2512 /* Check VLAN. */
33bf9176 2513 vid = vlan_tci_to_vid(flow->vlan_tci);
2d9b49dd
BP
2514 if (!input_vid_is_valid(ctx, vid, in_xbundle)) {
2515 xlate_report(ctx, OFT_WARN,
2516 "disallowed VLAN VID for this input port, dropping");
9583bc14
EJ
2517 return;
2518 }
46c88433 2519 vlan = input_vid_to_vlan(in_xbundle, vid);
9583bc14
EJ
2520
2521 /* Check other admissibility requirements. */
2522 if (in_port && !is_admissible(ctx, in_port, vlan)) {
2523 return;
2524 }
2525
2526 /* Learn source MAC. */
064799a1 2527 bool is_grat_arp = is_gratuitous_arp(flow, wc);
df70a773 2528 if (ctx->xin->allow_side_effects) {
2d9b49dd 2529 update_learning_table(ctx, in_xbundle, flow->dl_src, vlan,
064799a1 2530 is_grat_arp);
9583bc14 2531 }
064799a1 2532 if (ctx->xin->xcache && in_xbundle != &ofpp_none_bundle) {
b256dc52
JS
2533 struct xc_entry *entry;
2534
064799a1 2535 /* Save just enough info to update mac learning table later. */
b256dc52 2536 entry = xlate_cache_add_entry(ctx->xin->xcache, XC_NORMAL);
901a517e
JR
2537 entry->normal.ofproto = ctx->xbridge->ofproto;
2538 entry->normal.in_port = flow->in_port.ofp_port;
2539 entry->normal.dl_src = flow->dl_src;
2540 entry->normal.vlan = vlan;
2541 entry->normal.is_gratuitous_arp = is_grat_arp;
b256dc52 2542 }
9583bc14
EJ
2543
2544 /* Determine output bundle. */
86e2dcdd
FL
2545 if (mcast_snooping_enabled(ctx->xbridge->ms)
2546 && !eth_addr_is_broadcast(flow->dl_dst)
2547 && eth_addr_is_multicast(flow->dl_dst)
06994f87 2548 && is_ip_any(flow)) {
86e2dcdd 2549 struct mcast_snooping *ms = ctx->xbridge->ms;
06994f87 2550 struct mcast_group *grp = NULL;
86e2dcdd 2551
a75636c8
BP
2552 if (is_igmp(flow, wc)) {
2553 memset(&wc->masks.tp_src, 0xff, sizeof wc->masks.tp_src);
d29f137b
TLSC
2554 if (mcast_snooping_is_membership(flow->tp_src) ||
2555 mcast_snooping_is_query(flow->tp_src)) {
df70a773 2556 if (ctx->xin->allow_side_effects && ctx->xin->packet) {
2d9b49dd 2557 update_mcast_snooping_table(ctx, flow, vlan,
e3102e42 2558 in_xbundle, ctx->xin->packet);
d29f137b
TLSC
2559 }
2560 /*
2561 * IGMP packets need to take the slow path, in order to be
2562 * processed for mdb updates. That will prevent expires
2563 * firing off even after hosts have sent reports.
2564 */
2565 ctx->xout->slow |= SLOW_ACTION;
86e2dcdd 2566 }
d6d5bbc9 2567
86e2dcdd
FL
2568 if (mcast_snooping_is_membership(flow->tp_src)) {
2569 ovs_rwlock_rdlock(&ms->rwlock);
2570 xlate_normal_mcast_send_mrouters(ctx, ms, in_xbundle, vlan);
8e04a33f
FL
2571 /* RFC4541: section 2.1.1, item 1: A snooping switch should
2572 * forward IGMP Membership Reports only to those ports where
2573 * multicast routers are attached. Alternatively stated: a
2574 * snooping switch should not forward IGMP Membership Reports
2575 * to ports on which only hosts are attached.
2576 * An administrative control may be provided to override this
2577 * restriction, allowing the report messages to be flooded to
2578 * other ports. */
2579 xlate_normal_mcast_send_rports(ctx, ms, in_xbundle, vlan);
86e2dcdd
FL
2580 ovs_rwlock_unlock(&ms->rwlock);
2581 } else {
2d9b49dd 2582 xlate_report(ctx, OFT_DETAIL, "multicast traffic, flooding");
86e2dcdd
FL
2583 xlate_normal_flood(ctx, in_xbundle, vlan);
2584 }
2585 return;
a75636c8 2586 } else if (is_mld(flow, wc)) {
06994f87 2587 ctx->xout->slow |= SLOW_ACTION;
df70a773 2588 if (ctx->xin->allow_side_effects && ctx->xin->packet) {
2d9b49dd 2589 update_mcast_snooping_table(ctx, flow, vlan,
06994f87
TLSC
2590 in_xbundle, ctx->xin->packet);
2591 }
a75636c8 2592 if (is_mld_report(flow, wc)) {
06994f87
TLSC
2593 ovs_rwlock_rdlock(&ms->rwlock);
2594 xlate_normal_mcast_send_mrouters(ctx, ms, in_xbundle, vlan);
2595 xlate_normal_mcast_send_rports(ctx, ms, in_xbundle, vlan);
2596 ovs_rwlock_unlock(&ms->rwlock);
2597 } else {
2d9b49dd 2598 xlate_report(ctx, OFT_DETAIL, "MLD query, flooding");
06994f87
TLSC
2599 xlate_normal_flood(ctx, in_xbundle, vlan);
2600 }
86e2dcdd 2601 } else {
a75636c8 2602 if (is_ip_local_multicast(flow, wc)) {
86e2dcdd
FL
2603 /* RFC4541: section 2.1.2, item 2: Packets with a dst IP
2604 * address in the 224.0.0.x range which are not IGMP must
2605 * be forwarded on all ports */
2d9b49dd
BP
2606 xlate_report(ctx, OFT_DETAIL,
2607 "RFC4541: section 2.1.2, item 2, flooding");
86e2dcdd
FL
2608 xlate_normal_flood(ctx, in_xbundle, vlan);
2609 return;
2610 }
2611 }
2612
2613 /* forwarding to group base ports */
2614 ovs_rwlock_rdlock(&ms->rwlock);
06994f87
TLSC
2615 if (flow->dl_type == htons(ETH_TYPE_IP)) {
2616 grp = mcast_snooping_lookup4(ms, flow->nw_dst, vlan);
2617 } else if (flow->dl_type == htons(ETH_TYPE_IPV6)) {
2618 grp = mcast_snooping_lookup(ms, &flow->ipv6_dst, vlan);
2619 }
86e2dcdd
FL
2620 if (grp) {
2621 xlate_normal_mcast_send_group(ctx, ms, grp, in_xbundle, vlan);
2622 xlate_normal_mcast_send_fports(ctx, ms, in_xbundle, vlan);
2623 xlate_normal_mcast_send_mrouters(ctx, ms, in_xbundle, vlan);
9583bc14 2624 } else {
86e2dcdd 2625 if (mcast_snooping_flood_unreg(ms)) {
2d9b49dd
BP
2626 xlate_report(ctx, OFT_DETAIL,
2627 "unregistered multicast, flooding");
86e2dcdd
FL
2628 xlate_normal_flood(ctx, in_xbundle, vlan);
2629 } else {
2630 xlate_normal_mcast_send_mrouters(ctx, ms, in_xbundle, vlan);
2631 xlate_normal_mcast_send_fports(ctx, ms, in_xbundle, vlan);
2632 }
9583bc14 2633 }
86e2dcdd 2634 ovs_rwlock_unlock(&ms->rwlock);
9583bc14 2635 } else {
86e2dcdd
FL
2636 ovs_rwlock_rdlock(&ctx->xbridge->ml->rwlock);
2637 mac = mac_learning_lookup(ctx->xbridge->ml, flow->dl_dst, vlan);
9d078ec2 2638 mac_port = mac ? mac_entry_get_port(ctx->xbridge->ml, mac) : NULL;
86e2dcdd
FL
2639 ovs_rwlock_unlock(&ctx->xbridge->ml->rwlock);
2640
2641 if (mac_port) {
2642 struct xlate_cfg *xcfg = ovsrcu_get(struct xlate_cfg *, &xcfgp);
2643 struct xbundle *mac_xbundle = xbundle_lookup(xcfg, mac_port);
2644 if (mac_xbundle && mac_xbundle != in_xbundle) {
2d9b49dd 2645 xlate_report(ctx, OFT_DETAIL, "forwarding to learned port");
86e2dcdd
FL
2646 output_normal(ctx, mac_xbundle, vlan);
2647 } else if (!mac_xbundle) {
2d9b49dd
BP
2648 xlate_report(ctx, OFT_WARN,
2649 "learned port is unknown, dropping");
86e2dcdd 2650 } else {
2d9b49dd
BP
2651 xlate_report(ctx, OFT_DETAIL,
2652 "learned port is input port, dropping");
86e2dcdd
FL
2653 }
2654 } else {
2d9b49dd
BP
2655 xlate_report(ctx, OFT_DETAIL,
2656 "no learned MAC for destination, flooding");
86e2dcdd
FL
2657 xlate_normal_flood(ctx, in_xbundle, vlan);
2658 }
9583bc14
EJ
2659 }
2660}
2661
a6092018
BP
2662/* Appends a "sample" action for sFlow or IPFIX to 'ctx->odp_actions'. The
2663 * 'probability' is the number of packets out of UINT32_MAX to sample. The
2664 * 'cookie' (of length 'cookie_size' bytes) is passed back in the callback for
2665 * each sampled packet. 'tunnel_out_port', if not ODPP_NONE, is added as the
2666 * OVS_USERSPACE_ATTR_EGRESS_TUN_PORT attribute. If 'include_actions', an
f69f713b
BY
2667 * OVS_USERSPACE_ATTR_ACTIONS attribute is added. If 'emit_set_tunnel',
2668 * sample(sampling_port=1) would translate into datapath sample action
2669 * set(tunnel(...)), sample(...) and it is used for sampling egress tunnel
2670 * information.
9583bc14
EJ
2671 */
2672static size_t
a6092018 2673compose_sample_action(struct xlate_ctx *ctx,
9583bc14
EJ
2674 const uint32_t probability,
2675 const union user_action_cookie *cookie,
8b7ea2d4 2676 const size_t cookie_size,
7321bda3
NM
2677 const odp_port_t tunnel_out_port,
2678 bool include_actions)
9583bc14 2679{
b97f2c3a
BY
2680 if (probability == 0) {
2681 /* No need to generate sampling or the inner action. */
2682 return 0;
2683 }
2684
72471622
BY
2685 /* No need to generate sample action for 100% sampling rate. */
2686 bool is_sample = probability < UINT32_MAX;
2687 size_t sample_offset, actions_offset;
2688 if (is_sample) {
2689 sample_offset = nl_msg_start_nested(ctx->odp_actions,
2690 OVS_ACTION_ATTR_SAMPLE);
2691 nl_msg_put_u32(ctx->odp_actions, OVS_SAMPLE_ATTR_PROBABILITY,
2692 probability);
2693 actions_offset = nl_msg_start_nested(ctx->odp_actions,
2694 OVS_SAMPLE_ATTR_ACTIONS);
2695 }
9583bc14 2696
a6092018
BP
2697 odp_port_t odp_port = ofp_port_to_odp_port(
2698 ctx->xbridge, ctx->xin->flow.in_port.ofp_port);
2699 uint32_t pid = dpif_port_get_pid(ctx->xbridge->dpif, odp_port,
2700 flow_hash_5tuple(&ctx->xin->flow, 0));
2701 int cookie_offset = odp_put_userspace_action(pid, cookie, cookie_size,
2702 tunnel_out_port,
2703 include_actions,
2704 ctx->odp_actions);
89a8a7f0 2705
72471622
BY
2706 if (is_sample) {
2707 nl_msg_end_nested(ctx->odp_actions, actions_offset);
2708 nl_msg_end_nested(ctx->odp_actions, sample_offset);
2709 }
9583bc14 2710
9583bc14
EJ
2711 return cookie_offset;
2712}
2713
a6092018
BP
2714/* If sFLow is not enabled, returns 0 without doing anything.
2715 *
2716 * If sFlow is enabled, appends a template "sample" action to the ODP actions
2717 * in 'ctx'. This action is a template because some of the information needed
2718 * to fill it out is not available until flow translation is complete. In this
2719 * case, this functions returns an offset, which is always nonzero, to pass
2720 * later to fix_sflow_action() to fill in the rest of the template. */
9583bc14 2721static size_t
a6092018 2722compose_sflow_action(struct xlate_ctx *ctx)
9583bc14 2723{
a6092018
BP
2724 struct dpif_sflow *sflow = ctx->xbridge->sflow;
2725 if (!sflow || ctx->xin->flow.in_port.ofp_port == OFPP_NONE) {
9583bc14
EJ
2726 return 0;
2727 }
2728
a6092018
BP
2729 union user_action_cookie cookie = { .type = USER_ACTION_COOKIE_SFLOW };
2730 return compose_sample_action(ctx, dpif_sflow_get_probability(sflow),
7321bda3
NM
2731 &cookie, sizeof cookie.sflow, ODPP_NONE,
2732 true);
9583bc14
EJ
2733}
2734
f69f713b
BY
2735/* If flow IPFIX is enabled, make sure IPFIX flow sample action
2736 * at egress point of tunnel port is just in front of corresponding
2737 * output action. If bridge IPFIX is enabled, this appends an IPFIX
2738 * sample action to 'ctx->odp_actions'. */
9583bc14 2739static void
a6092018 2740compose_ipfix_action(struct xlate_ctx *ctx, odp_port_t output_odp_port)
9583bc14 2741{
a6092018 2742 struct dpif_ipfix *ipfix = ctx->xbridge->ipfix;
8b7ea2d4 2743 odp_port_t tunnel_out_port = ODPP_NONE;
9583bc14 2744
a6092018 2745 if (!ipfix || ctx->xin->flow.in_port.ofp_port == OFPP_NONE) {
9583bc14
EJ
2746 return;
2747 }
2748
8b7ea2d4
WZ
2749 /* For input case, output_odp_port is ODPP_NONE, which is an invalid port
2750 * number. */
2751 if (output_odp_port == ODPP_NONE &&
a6092018 2752 !dpif_ipfix_get_bridge_exporter_input_sampling(ipfix)) {
8b7ea2d4
WZ
2753 return;
2754 }
2755
f69f713b 2756 /* For output case, output_odp_port is valid. */
8b7ea2d4 2757 if (output_odp_port != ODPP_NONE) {
a6092018 2758 if (!dpif_ipfix_get_bridge_exporter_output_sampling(ipfix)) {
8b7ea2d4
WZ
2759 return;
2760 }
2761 /* If tunnel sampling is enabled, put an additional option attribute:
2762 * OVS_USERSPACE_ATTR_TUNNEL_OUT_PORT
2763 */
a6092018
BP
2764 if (dpif_ipfix_get_bridge_exporter_tunnel_sampling(ipfix) &&
2765 dpif_ipfix_get_tunnel_port(ipfix, output_odp_port) ) {
8b7ea2d4
WZ
2766 tunnel_out_port = output_odp_port;
2767 }
2768 }
2769
a6092018
BP
2770 union user_action_cookie cookie = {
2771 .ipfix = {
2772 .type = USER_ACTION_COOKIE_IPFIX,
2773 .output_odp_port = output_odp_port,
2774 }
2775 };
2776 compose_sample_action(ctx,
2777 dpif_ipfix_get_bridge_exporter_probability(ipfix),
7321bda3
NM
2778 &cookie, sizeof cookie.ipfix, tunnel_out_port,
2779 false);
9583bc14
EJ
2780}
2781
a6092018
BP
2782/* Fix "sample" action according to data collected while composing ODP actions,
2783 * as described in compose_sflow_action().
2784 *
2785 * 'user_cookie_offset' must be the offset returned by add_sflow_action(). */
9583bc14 2786static void
a6092018 2787fix_sflow_action(struct xlate_ctx *ctx, unsigned int user_cookie_offset)
9583bc14
EJ
2788{
2789 const struct flow *base = &ctx->base_flow;
2790 union user_action_cookie *cookie;
2791
a6092018 2792 cookie = ofpbuf_at(ctx->odp_actions, user_cookie_offset,
9583bc14
EJ
2793 sizeof cookie->sflow);
2794 ovs_assert(cookie->type == USER_ACTION_COOKIE_SFLOW);
2795
a6092018
BP
2796 cookie->type = USER_ACTION_COOKIE_SFLOW;
2797 cookie->sflow.vlan_tci = base->vlan_tci;
2798
2799 /* See http://www.sflow.org/sflow_version_5.txt (search for "Input/output
2800 * port information") for the interpretation of cookie->output. */
2801 switch (ctx->sflow_n_outputs) {
2802 case 0:
2803 /* 0x40000000 | 256 means "packet dropped for unknown reason". */
2804 cookie->sflow.output = 0x40000000 | 256;
2805 break;
2806
2807 case 1:
2808 cookie->sflow.output = dpif_sflow_odp_port_to_ifindex(
2809 ctx->xbridge->sflow, ctx->sflow_odp_port);
2810 if (cookie->sflow.output) {
2811 break;
2812 }
2813 /* Fall through. */
2814 default:
2815 /* 0x80000000 means "multiple output ports. */
2816 cookie->sflow.output = 0x80000000 | ctx->sflow_n_outputs;
2817 break;
2818 }
9583bc14
EJ
2819}
2820
515793d5
BP
2821static bool
2822process_special(struct xlate_ctx *ctx, const struct xport *xport)
db7d4e46 2823{
515793d5 2824 const struct flow *flow = &ctx->xin->flow;
49a73e0c 2825 struct flow_wildcards *wc = ctx->wc;
46c88433 2826 const struct xbridge *xbridge = ctx->xbridge;
515793d5
BP
2827 const struct dp_packet *packet = ctx->xin->packet;
2828 enum slow_path_reason slow;
642dc74d 2829
46c88433 2830 if (!xport) {
515793d5 2831 slow = 0;
46c88433 2832 } else if (xport->cfm && cfm_should_process_flow(xport->cfm, flow, wc)) {
db7d4e46 2833 if (packet) {
46c88433 2834 cfm_process_heartbeat(xport->cfm, packet);
db7d4e46 2835 }
515793d5 2836 slow = SLOW_CFM;
fab52e16 2837 } else if (xport->bfd && bfd_should_process_flow(xport->bfd, flow, wc)) {
db7d4e46 2838 if (packet) {
46c88433 2839 bfd_process_packet(xport->bfd, flow, packet);
60d02c72
AW
2840 /* If POLL received, immediately sends FINAL back. */
2841 if (bfd_should_send_packet(xport->bfd)) {
6d308b28 2842 ofproto_dpif_monitor_port_send_soon(xport->ofport);
60d02c72 2843 }
db7d4e46 2844 }
515793d5 2845 slow = SLOW_BFD;
46c88433 2846 } else if (xport->xbundle && xport->xbundle->lacp
db7d4e46
JP
2847 && flow->dl_type == htons(ETH_TYPE_LACP)) {
2848 if (packet) {
46c88433 2849 lacp_process_packet(xport->xbundle->lacp, xport->ofport, packet);
db7d4e46 2850 }
515793d5 2851 slow = SLOW_LACP;
9efd308e
DV
2852 } else if ((xbridge->stp || xbridge->rstp) &&
2853 stp_should_process_flow(flow, wc)) {
db7d4e46 2854 if (packet) {
f025bcb7
JR
2855 xbridge->stp
2856 ? stp_process_packet(xport, packet)
2857 : rstp_process_packet(xport, packet);
db7d4e46 2858 }
515793d5 2859 slow = SLOW_STP;
19aef6ef 2860 } else if (xport->lldp && lldp_should_process_flow(xport->lldp, flow)) {
0477baa9
DF
2861 if (packet) {
2862 lldp_process_packet(xport->lldp, packet);
2863 }
515793d5 2864 slow = SLOW_LLDP;
db7d4e46 2865 } else {
515793d5
BP
2866 slow = 0;
2867 }
2868
2869 if (slow) {
2870 ctx->xout->slow |= slow;
2871 return true;
2872 } else {
2873 return false;
db7d4e46
JP
2874 }
2875}
2876
a36de779
PS
2877static int
2878tnl_route_lookup_flow(const struct flow *oflow,
a8704b50
PS
2879 struct in6_addr *ip, struct in6_addr *src,
2880 struct xport **out_port)
a36de779
PS
2881{
2882 char out_dev[IFNAMSIZ];
2883 struct xbridge *xbridge;
2884 struct xlate_cfg *xcfg;
c2b878e0
TLSC
2885 struct in6_addr gw;
2886 struct in6_addr dst;
a36de779 2887
c2b878e0 2888 dst = flow_tnl_dst(&oflow->tunnel);
ed52ca57 2889 if (!ovs_router_lookup(oflow->pkt_mark, &dst, out_dev, src, &gw)) {
a36de779
PS
2890 return -ENOENT;
2891 }
2892
c2b878e0
TLSC
2893 if (ipv6_addr_is_set(&gw) &&
2894 (!IN6_IS_ADDR_V4MAPPED(&gw) || in6_addr_get_mapped_ipv4(&gw))) {
a36de779
PS
2895 *ip = gw;
2896 } else {
c2b878e0 2897 *ip = dst;
a36de779
PS
2898 }
2899
2900 xcfg = ovsrcu_get(struct xlate_cfg *, &xcfgp);
2901 ovs_assert(xcfg);
2902
2903 HMAP_FOR_EACH (xbridge, hmap_node, &xcfg->xbridges) {
2904 if (!strncmp(xbridge->name, out_dev, IFNAMSIZ)) {
2905 struct xport *port;
2906
2907 HMAP_FOR_EACH (port, ofp_node, &xbridge->xports) {
2908 if (!strncmp(netdev_get_name(port->netdev), out_dev, IFNAMSIZ)) {
2909 *out_port = port;
2910 return 0;
2911 }
2912 }
2913 }
2914 }
2915 return -ENOENT;
2916}
2917
2918static int
cdd42eda
JG
2919compose_table_xlate(struct xlate_ctx *ctx, const struct xport *out_dev,
2920 struct dp_packet *packet)
a36de779 2921{
cdd42eda 2922 struct xbridge *xbridge = out_dev->xbridge;
a36de779
PS
2923 struct ofpact_output output;
2924 struct flow flow;
2925
2926 ofpact_init(&output.ofpact, OFPACT_OUTPUT, sizeof output);
cf62fa4c 2927 flow_extract(packet, &flow);
cdd42eda
JG
2928 flow.in_port.ofp_port = out_dev->ofp_port;
2929 output.port = OFPP_TABLE;
a36de779
PS
2930 output.max_len = 0;
2931
1f4a8933
JR
2932 return ofproto_dpif_execute_actions__(xbridge->ofproto,
2933 ctx->xin->tables_version, &flow,
2934 NULL, &output.ofpact, sizeof output,
2d9b49dd 2935 ctx->depth, ctx->resubmits, packet);
a36de779
PS
2936}
2937
c2b878e0
TLSC
2938static void
2939tnl_send_nd_request(struct xlate_ctx *ctx, const struct xport *out_dev,
2940 const struct eth_addr eth_src,
2941 struct in6_addr * ipv6_src, struct in6_addr * ipv6_dst)
2942{
2943 struct dp_packet packet;
2944
2945 dp_packet_init(&packet, 0);
16187903 2946 compose_nd_ns(&packet, eth_src, ipv6_src, ipv6_dst);
c2b878e0
TLSC
2947 compose_table_xlate(ctx, out_dev, &packet);
2948 dp_packet_uninit(&packet);
2949}
2950
a36de779 2951static void
cdd42eda 2952tnl_send_arp_request(struct xlate_ctx *ctx, const struct xport *out_dev,
74ff3298 2953 const struct eth_addr eth_src,
a36de779
PS
2954 ovs_be32 ip_src, ovs_be32 ip_dst)
2955{
cf62fa4c 2956 struct dp_packet packet;
a36de779 2957
cf62fa4c 2958 dp_packet_init(&packet, 0);
eb0b295e
BP
2959 compose_arp(&packet, ARP_OP_REQUEST,
2960 eth_src, eth_addr_zero, true, ip_src, ip_dst);
a36de779 2961
cdd42eda 2962 compose_table_xlate(ctx, out_dev, &packet);
cf62fa4c 2963 dp_packet_uninit(&packet);
a36de779
PS
2964}
2965
2966static int
81de18ec 2967build_tunnel_send(struct xlate_ctx *ctx, const struct xport *xport,
a36de779
PS
2968 const struct flow *flow, odp_port_t tunnel_odp_port)
2969{
4975aa3e 2970 struct netdev_tnl_build_header_params tnl_params;
a36de779
PS
2971 struct ovs_action_push_tnl tnl_push_data;
2972 struct xport *out_dev = NULL;
c2b878e0
TLSC
2973 ovs_be32 s_ip = 0, d_ip = 0;
2974 struct in6_addr s_ip6 = in6addr_any;
2975 struct in6_addr d_ip6 = in6addr_any;
74ff3298
JR
2976 struct eth_addr smac;
2977 struct eth_addr dmac;
a36de779 2978 int err;
c2b878e0
TLSC
2979 char buf_sip6[INET6_ADDRSTRLEN];
2980 char buf_dip6[INET6_ADDRSTRLEN];
a36de779 2981
a8704b50 2982 err = tnl_route_lookup_flow(flow, &d_ip6, &s_ip6, &out_dev);
a36de779 2983 if (err) {
2d9b49dd 2984 xlate_report(ctx, OFT_WARN, "native tunnel routing failed");
a36de779
PS
2985 return err;
2986 }
c2b878e0 2987
2d9b49dd 2988 xlate_report(ctx, OFT_DETAIL, "tunneling to %s via %s",
c2b878e0
TLSC
2989 ipv6_string_mapped(buf_dip6, &d_ip6),
2990 netdev_get_name(out_dev->netdev));
a36de779
PS
2991
2992 /* Use mac addr of bridge port of the peer. */
74ff3298 2993 err = netdev_get_etheraddr(out_dev->netdev, &smac);
a36de779 2994 if (err) {
2d9b49dd
BP
2995 xlate_report(ctx, OFT_WARN,
2996 "tunnel output device lacks Ethernet address");
a36de779
PS
2997 return err;
2998 }
2999
c2b878e0
TLSC
3000 d_ip = in6_addr_get_mapped_ipv4(&d_ip6);
3001 if (d_ip) {
a8704b50 3002 s_ip = in6_addr_get_mapped_ipv4(&s_ip6);
a36de779
PS
3003 }
3004
c2b878e0 3005 err = tnl_neigh_lookup(out_dev->xbridge->name, &d_ip6, &dmac);
a36de779 3006 if (err) {
2d9b49dd
BP
3007 xlate_report(ctx, OFT_DETAIL,
3008 "neighbor cache miss for %s on bridge %s, "
c2b878e0
TLSC
3009 "sending %s request",
3010 buf_dip6, out_dev->xbridge->name, d_ip ? "ARP" : "ND");
3011 if (d_ip) {
3012 tnl_send_arp_request(ctx, out_dev, smac, s_ip, d_ip);
3013 } else {
3014 tnl_send_nd_request(ctx, out_dev, smac, &s_ip6, &d_ip6);
3015 }
a36de779
PS
3016 return err;
3017 }
c2b878e0 3018
a36de779
PS
3019 if (ctx->xin->xcache) {
3020 struct xc_entry *entry;
3021
53902038 3022 entry = xlate_cache_add_entry(ctx->xin->xcache, XC_TNL_NEIGH);
901a517e
JR
3023 ovs_strlcpy(entry->tnl_neigh_cache.br_name, out_dev->xbridge->name,
3024 sizeof entry->tnl_neigh_cache.br_name);
3025 entry->tnl_neigh_cache.d_ipv6 = d_ip6;
a36de779 3026 }
81de18ec 3027
2d9b49dd 3028 xlate_report(ctx, OFT_DETAIL, "tunneling from "ETH_ADDR_FMT" %s"
c2b878e0
TLSC
3029 " to "ETH_ADDR_FMT" %s",
3030 ETH_ADDR_ARGS(smac), ipv6_string_mapped(buf_sip6, &s_ip6),
3031 ETH_ADDR_ARGS(dmac), buf_dip6);
3032
4975aa3e
PS
3033 netdev_init_tnl_build_header_params(&tnl_params, flow, &s_ip6, dmac, smac);
3034 err = tnl_port_build_header(xport->ofport, &tnl_push_data, &tnl_params);
a36de779
PS
3035 if (err) {
3036 return err;
3037 }
3038 tnl_push_data.tnl_port = odp_to_u32(tunnel_odp_port);
3039 tnl_push_data.out_port = odp_to_u32(out_dev->odp_port);
1520ef4f 3040 odp_put_tnl_push_action(ctx->odp_actions, &tnl_push_data);
a36de779
PS
3041 return 0;
3042}
3043
704bb0bf
JS
3044static void
3045xlate_commit_actions(struct xlate_ctx *ctx)
3046{
3047 bool use_masked = ctx->xbridge->support.masked_set_action;
3048
3049 ctx->xout->slow |= commit_odp_actions(&ctx->xin->flow, &ctx->base_flow,
3050 ctx->odp_actions, ctx->wc,
3051 use_masked);
3052}
3053
07659514 3054static void
72fe7578 3055clear_conntrack(struct xlate_ctx *ctx)
07659514 3056{
72fe7578
BP
3057 ctx->conntracked = false;
3058
3059 struct flow *flow = &ctx->xin->flow;
07659514
JS
3060 flow->ct_state = 0;
3061 flow->ct_zone = 0;
8e53fe8c 3062 flow->ct_mark = 0;
72fe7578 3063 flow->ct_label = OVS_U128_ZERO;
daf4d3c1
JR
3064
3065 flow->ct_nw_proto = 0;
3066 flow->ct_tp_src = 0;
3067 flow->ct_tp_dst = 0;
3068 if (flow->dl_type == htons(ETH_TYPE_IP)) {
3069 flow->ct_nw_src = 0;
3070 flow->ct_nw_dst = 0;
3071 } if (flow->dl_type == htons(ETH_TYPE_IPV6)) {
3072 memset(&flow->ct_ipv6_src, 0, sizeof flow->ct_ipv6_src);
3073 memset(&flow->ct_ipv6_dst, 0, sizeof flow->ct_ipv6_dst);
3074 }
07659514
JS
3075}
3076
58d636ee
BK
3077static bool
3078xlate_flow_is_protected(const struct xlate_ctx *ctx, const struct flow *flow, const struct xport *xport_out)
3079{
3080 const struct xport *xport_in;
3081
3082 if (!xport_out) {
3083 return false;
3084 }
3085
3086 xport_in = get_ofp_port(ctx->xbridge, flow->in_port.ofp_port);
3087
3088 return (xport_in && xport_in->xbundle && xport_out->xbundle &&
3089 xport_in->xbundle->protected && xport_out->xbundle->protected);
3090}
3091
9583bc14 3092static void
4e022ec0 3093compose_output_action__(struct xlate_ctx *ctx, ofp_port_t ofp_port,
e93ef1c7 3094 const struct xlate_bond_recirc *xr, bool check_stp)
9583bc14 3095{
46c88433 3096 const struct xport *xport = get_ofp_port(ctx->xbridge, ofp_port);
49a73e0c 3097 struct flow_wildcards *wc = ctx->wc;
33bf9176 3098 struct flow *flow = &ctx->xin->flow;
a36de779 3099 struct flow_tnl flow_tnl;
9583bc14 3100 ovs_be16 flow_vlan_tci;
1362e248 3101 uint32_t flow_pkt_mark;
9583bc14 3102 uint8_t flow_nw_tos;
4e022ec0 3103 odp_port_t out_port, odp_port;
a36de779 3104 bool tnl_push_pop_send = false;
ca077186 3105 uint8_t dscp;
9583bc14
EJ
3106
3107 /* If 'struct flow' gets additional metadata, we'll need to zero it out
3108 * before traversing a patch port. */
daf4d3c1 3109 BUILD_ASSERT_DECL(FLOW_WC_SEQ == 37);
a36de779 3110 memset(&flow_tnl, 0, sizeof flow_tnl);
9583bc14 3111
46c88433 3112 if (!xport) {
2d9b49dd 3113 xlate_report(ctx, OFT_WARN, "Nonexistent output port");
9583bc14 3114 return;
46c88433 3115 } else if (xport->config & OFPUTIL_PC_NO_FWD) {
2d9b49dd 3116 xlate_report(ctx, OFT_DETAIL, "OFPPC_NO_FWD set, skipping output");
9583bc14 3117 return;
1356dbd1 3118 } else if (ctx->mirror_snaplen != 0 && xport->odp_port == ODPP_NONE) {
2d9b49dd
BP
3119 xlate_report(ctx, OFT_WARN,
3120 "Mirror truncate to ODPP_NONE, skipping output");
1356dbd1 3121 return;
58d636ee 3122 } else if (xlate_flow_is_protected(ctx, flow, xport)) {
2d9b49dd
BP
3123 xlate_report(ctx, OFT_WARN,
3124 "Flow is between protected ports, skipping output.");
58d636ee 3125 return;
0d1cee12 3126 } else if (check_stp) {
bbbca389 3127 if (is_stp(&ctx->base_flow)) {
9efd308e
DV
3128 if (!xport_stp_should_forward_bpdu(xport) &&
3129 !xport_rstp_should_manage_bpdu(xport)) {
3130 if (ctx->xbridge->stp != NULL) {
2d9b49dd
BP
3131 xlate_report(ctx, OFT_WARN,
3132 "STP not in listening state, "
3133 "skipping bpdu output");
9efd308e 3134 } else if (ctx->xbridge->rstp != NULL) {
2d9b49dd
BP
3135 xlate_report(ctx, OFT_WARN,
3136 "RSTP not managing BPDU in this state, "
3137 "skipping bpdu output");
9efd308e 3138 }
0d1cee12
K
3139 return;
3140 }
9efd308e
DV
3141 } else if (!xport_stp_forward_state(xport) ||
3142 !xport_rstp_forward_state(xport)) {
3143 if (ctx->xbridge->stp != NULL) {
2d9b49dd
BP
3144 xlate_report(ctx, OFT_WARN,
3145 "STP not in forwarding state, skipping output");
9efd308e 3146 } else if (ctx->xbridge->rstp != NULL) {
2d9b49dd
BP
3147 xlate_report(ctx, OFT_WARN,
3148 "RSTP not in forwarding state, skipping output");
9efd308e 3149 }
0d1cee12
K
3150 return;
3151 }
9583bc14
EJ
3152 }
3153
46c88433
EJ
3154 if (xport->peer) {
3155 const struct xport *peer = xport->peer;
9583bc14 3156 struct flow old_flow = ctx->xin->flow;
8d8ab6c2 3157 struct flow_tnl old_flow_tnl_wc = ctx->wc->masks.tunnel;
07659514 3158 bool old_conntrack = ctx->conntracked;
e12ec36b 3159 bool old_was_mpls = ctx->was_mpls;
1f4a8933 3160 ovs_version_t old_version = ctx->xin->tables_version;
1774d762 3161 struct ofpbuf old_stack = ctx->stack;
84cf3c1f 3162 uint8_t new_stack[1024];
ed9c9e3e 3163 struct ofpbuf old_action_set = ctx->action_set;
2d9b49dd 3164 struct ovs_list *old_trace = ctx->xin->trace;
ed9c9e3e 3165 uint64_t actset_stub[1024 / 8];
9583bc14 3166
1774d762 3167 ofpbuf_use_stub(&ctx->stack, new_stack, sizeof new_stack);
ed9c9e3e 3168 ofpbuf_use_stub(&ctx->action_set, actset_stub, sizeof actset_stub);
46c88433 3169 flow->in_port.ofp_port = peer->ofp_port;
33bf9176
BP
3170 flow->metadata = htonll(0);
3171 memset(&flow->tunnel, 0, sizeof flow->tunnel);
07a3cd5c
BP
3172 flow->tunnel.metadata.tab = ofproto_get_tun_tab(
3173 &peer->xbridge->ofproto->up);
8d8ab6c2 3174 ctx->wc->masks.tunnel.metadata.tab = flow->tunnel.metadata.tab;
33bf9176 3175 memset(flow->regs, 0, sizeof flow->regs);
c61f3870 3176 flow->actset_output = OFPP_UNSET;
72fe7578 3177 clear_conntrack(ctx);
2d9b49dd
BP
3178 ctx->xin->trace = xlate_report(ctx, OFT_BRIDGE,
3179 "bridge(\"%s\")", peer->xbridge->name);
9583bc14 3180
76f3c260
BP
3181 /* When the patch port points to a different bridge, then the mirrors
3182 * for that bridge clearly apply independently to the packet, so we
3183 * reset the mirror bitmap to zero and then restore it after the packet
3184 * returns.
3185 *
3186 * When the patch port points to the same bridge, this is more of a
3187 * design decision: can mirrors be re-applied to the packet after it
3188 * re-enters the bridge, or should we treat that as doubly mirroring a
3189 * single packet? The former may be cleaner, since it respects the
3190 * model in which a patch port is like a physical cable plugged from
3191 * one switch port to another, but the latter may be less surprising to
3192 * users. We take the latter choice, for now at least. (To use the
3193 * former choice, hard-code 'independent_mirrors' to "true".) */
3194 mirror_mask_t old_mirrors = ctx->mirrors;
3195 bool independent_mirrors = peer->xbridge != ctx->xbridge;
3196 if (independent_mirrors) {
3197 ctx->mirrors = 0;
3198 }
3199 ctx->xbridge = peer->xbridge;
3200
621b8064 3201 /* The bridge is now known so obtain its table version. */
1f4a8933 3202 ctx->xin->tables_version
621b8064
JR
3203 = ofproto_dpif_get_tables_version(ctx->xbridge->ofproto);
3204
515793d5 3205 if (!process_special(ctx, peer) && may_receive(peer, ctx)) {
9efd308e 3206 if (xport_stp_forward_state(peer) && xport_rstp_forward_state(peer)) {
2cd20955
JR
3207 xlate_table_action(ctx, flow->in_port.ofp_port, 0, true, true,
3208 false);
1d361a81 3209 if (!ctx->freezing) {
8a5fb3b4 3210 xlate_action_set(ctx);
e672ff9b 3211 }
1d361a81 3212 if (ctx->freezing) {
77ab5fd2 3213 finish_freezing(ctx);
ed9c9e3e 3214 }
9583bc14 3215 } else {
9efd308e
DV
3216 /* Forwarding is disabled by STP and RSTP. Let OFPP_NORMAL and
3217 * the learning action look at the packet, then drop it. */
9583bc14 3218 struct flow old_base_flow = ctx->base_flow;
1520ef4f 3219 size_t old_size = ctx->odp_actions->size;
76f3c260 3220 mirror_mask_t old_mirrors2 = ctx->mirrors;
f3d5b473 3221
2cd20955
JR
3222 xlate_table_action(ctx, flow->in_port.ofp_port, 0, true, true,
3223 false);
76f3c260 3224 ctx->mirrors = old_mirrors2;
9583bc14 3225 ctx->base_flow = old_base_flow;
1520ef4f 3226 ctx->odp_actions->size = old_size;
e672ff9b 3227
1d361a81
BP
3228 /* Undo changes that may have been done for freezing. */
3229 ctx_cancel_freeze(ctx);
9583bc14
EJ
3230 }
3231 }
3232
2d9b49dd 3233 ctx->xin->trace = old_trace;
76f3c260
BP
3234 if (independent_mirrors) {
3235 ctx->mirrors = old_mirrors;
3236 }
9583bc14 3237 ctx->xin->flow = old_flow;
832554e3 3238 ctx->xbridge = xport->xbridge;
ed9c9e3e
JR
3239 ofpbuf_uninit(&ctx->action_set);
3240 ctx->action_set = old_action_set;
1774d762
JR
3241 ofpbuf_uninit(&ctx->stack);
3242 ctx->stack = old_stack;
9583bc14 3243
621b8064 3244 /* Restore calling bridge's lookup version. */
1f4a8933 3245 ctx->xin->tables_version = old_version;
621b8064 3246
8d8ab6c2
JG
3247 /* Since this packet came in on a patch port (from the perspective of
3248 * the peer bridge), it cannot have useful tunnel information. As a
3249 * result, any wildcards generated on that tunnel also cannot be valid.
3250 * The tunnel wildcards must be restored to their original version since
3251 * the peer bridge uses a separate tunnel metadata table and therefore
3252 * any generated wildcards will be garbage in the context of our
3253 * metadata table. */
3254 ctx->wc->masks.tunnel = old_flow_tnl_wc;
3255
e12ec36b
SH
3256 /* The peer bridge popping MPLS should have no effect on the original
3257 * bridge. */
3258 ctx->was_mpls = old_was_mpls;
3259
07659514
JS
3260 /* The peer bridge's conntrack execution should have no effect on the
3261 * original bridge. */
3262 ctx->conntracked = old_conntrack;
3263
f3d5b473
JR
3264 /* The fact that the peer bridge exits (for any reason) does not mean
3265 * that the original bridge should exit. Specifically, if the peer
1d361a81
BP
3266 * bridge freezes translation, the original bridge must continue
3267 * processing with the original, not the frozen packet! */
f3d5b473
JR
3268 ctx->exit = false;
3269
fff1b9c0
JR
3270 /* Peer bridge errors do not propagate back. */
3271 ctx->error = XLATE_OK;
3272
9583bc14 3273 if (ctx->xin->resubmit_stats) {
46c88433
EJ
3274 netdev_vport_inc_tx(xport->netdev, ctx->xin->resubmit_stats);
3275 netdev_vport_inc_rx(peer->netdev, ctx->xin->resubmit_stats);
a1aeea86
AW
3276 if (peer->bfd) {
3277 bfd_account_rx(peer->bfd, ctx->xin->resubmit_stats);
3278 }
9583bc14 3279 }
b256dc52
JS
3280 if (ctx->xin->xcache) {
3281 struct xc_entry *entry;
3282
3283 entry = xlate_cache_add_entry(ctx->xin->xcache, XC_NETDEV);
901a517e
JR
3284 entry->dev.tx = netdev_ref(xport->netdev);
3285 entry->dev.rx = netdev_ref(peer->netdev);
3286 entry->dev.bfd = bfd_ref(peer->bfd);
b256dc52 3287 }
9583bc14
EJ
3288 return;
3289 }
3290
33bf9176 3291 flow_vlan_tci = flow->vlan_tci;
1362e248 3292 flow_pkt_mark = flow->pkt_mark;
33bf9176 3293 flow_nw_tos = flow->nw_tos;
9583bc14 3294
16194afd
DDP
3295 if (count_skb_priorities(xport)) {
3296 memset(&wc->masks.skb_priority, 0xff, sizeof wc->masks.skb_priority);
3297 if (dscp_from_skb_priority(xport, flow->skb_priority, &dscp)) {
3298 wc->masks.nw_tos |= IP_DSCP_MASK;
3299 flow->nw_tos &= ~IP_DSCP_MASK;
3300 flow->nw_tos |= dscp;
3301 }
9583bc14
EJ
3302 }
3303
46c88433 3304 if (xport->is_tunnel) {
c2b878e0 3305 struct in6_addr dst;
9583bc14
EJ
3306 /* Save tunnel metadata so that changes made due to
3307 * the Logical (tunnel) Port are not visible for any further
3308 * matches, while explicit set actions on tunnel metadata are.
3309 */
a36de779 3310 flow_tnl = flow->tunnel;
49a73e0c 3311 odp_port = tnl_port_send(xport->ofport, flow, ctx->wc);
4e022ec0 3312 if (odp_port == ODPP_NONE) {
2d9b49dd 3313 xlate_report(ctx, OFT_WARN, "Tunneling decided against output");
9583bc14
EJ
3314 goto out; /* restore flow_nw_tos */
3315 }
c2b878e0
TLSC
3316 dst = flow_tnl_dst(&flow->tunnel);
3317 if (ipv6_addr_equals(&dst, &ctx->orig_tunnel_ipv6_dst)) {
2d9b49dd 3318 xlate_report(ctx, OFT_WARN, "Not tunneling to our own address");
9583bc14
EJ
3319 goto out; /* restore flow_nw_tos */
3320 }
3321 if (ctx->xin->resubmit_stats) {
46c88433 3322 netdev_vport_inc_tx(xport->netdev, ctx->xin->resubmit_stats);
9583bc14 3323 }
b256dc52
JS
3324 if (ctx->xin->xcache) {
3325 struct xc_entry *entry;
3326
3327 entry = xlate_cache_add_entry(ctx->xin->xcache, XC_NETDEV);
901a517e 3328 entry->dev.tx = netdev_ref(xport->netdev);
b256dc52 3329 }
9583bc14 3330 out_port = odp_port;
a36de779 3331 if (ovs_native_tunneling_is_on(ctx->xbridge->ofproto)) {
2d9b49dd 3332 xlate_report(ctx, OFT_DETAIL, "output to native tunnel");
a36de779
PS
3333 tnl_push_pop_send = true;
3334 } else {
2d9b49dd 3335 xlate_report(ctx, OFT_DETAIL, "output to kernel tunnel");
1520ef4f 3336 commit_odp_tunnel_action(flow, &ctx->base_flow, ctx->odp_actions);
a36de779
PS
3337 flow->tunnel = flow_tnl; /* Restore tunnel metadata */
3338 }
9583bc14 3339 } else {
46c88433 3340 odp_port = xport->odp_port;
7614e5d0 3341 out_port = odp_port;
9583bc14 3342 }
9583bc14 3343
4e022ec0 3344 if (out_port != ODPP_NONE) {
704bb0bf 3345 xlate_commit_actions(ctx);
adcf00ba 3346
e93ef1c7 3347 if (xr) {
347bf289 3348 struct ovs_action_hash *act_hash;
adcf00ba 3349
347bf289 3350 /* Hash action. */
1520ef4f 3351 act_hash = nl_msg_put_unspec_uninit(ctx->odp_actions,
347bf289
AZ
3352 OVS_ACTION_ATTR_HASH,
3353 sizeof *act_hash);
3354 act_hash->hash_alg = xr->hash_alg;
62ac1f20 3355 act_hash->hash_basis = xr->hash_basis;
347bf289
AZ
3356
3357 /* Recirc action. */
1520ef4f 3358 nl_msg_put_u32(ctx->odp_actions, OVS_ACTION_ATTR_RECIRC,
347bf289 3359 xr->recirc_id);
adcf00ba 3360 } else {
a36de779
PS
3361
3362 if (tnl_push_pop_send) {
3363 build_tunnel_send(ctx, xport, flow, odp_port);
3364 flow->tunnel = flow_tnl; /* Restore tunnel metadata */
3365 } else {
3366 odp_port_t odp_tnl_port = ODPP_NONE;
3367
3368 /* XXX: Write better Filter for tunnel port. We can use inport
3369 * int tunnel-port flow to avoid these checks completely. */
3370 if (ofp_port == OFPP_LOCAL &&
3371 ovs_native_tunneling_is_on(ctx->xbridge->ofproto)) {
3372
3373 odp_tnl_port = tnl_port_map_lookup(flow, wc);
3374 }
3375
3376 if (odp_tnl_port != ODPP_NONE) {
1520ef4f 3377 nl_msg_put_odp_port(ctx->odp_actions,
a36de779
PS
3378 OVS_ACTION_ATTR_TUNNEL_POP,
3379 odp_tnl_port);
3380 } else {
3381 /* Tunnel push-pop action is not compatible with
3382 * IPFIX action. */
a6092018 3383 compose_ipfix_action(ctx, out_port);
1356dbd1
WT
3384
3385 /* Handle truncation of the mirrored packet. */
3386 if (ctx->mirror_snaplen > 0 &&
3387 ctx->mirror_snaplen < UINT16_MAX) {
3388 struct ovs_action_trunc *trunc;
3389
3390 trunc = nl_msg_put_unspec_uninit(ctx->odp_actions,
3391 OVS_ACTION_ATTR_TRUNC,
3392 sizeof *trunc);
3393 trunc->max_len = ctx->mirror_snaplen;
3394 if (!ctx->xbridge->support.trunc) {
3395 ctx->xout->slow |= SLOW_ACTION;
3396 }
3397 }
3398
1520ef4f 3399 nl_msg_put_odp_port(ctx->odp_actions,
a36de779
PS
3400 OVS_ACTION_ATTR_OUTPUT,
3401 out_port);
1356dbd1
WT
3402 }
3403 }
adcf00ba 3404 }
9583bc14 3405
6cbbf4fa
EJ
3406 ctx->sflow_odp_port = odp_port;
3407 ctx->sflow_n_outputs++;
2031ef97 3408 ctx->nf_output_iface = ofp_port;
6cbbf4fa
EJ
3409 }
3410
7efbc3b7
BP
3411 if (mbridge_has_mirrors(ctx->xbridge->mbridge) && xport->xbundle) {
3412 mirror_packet(ctx, xport->xbundle,
3413 xbundle_mirror_dst(xport->xbundle->xbridge,
3414 xport->xbundle));
3415 }
3416
6cbbf4fa 3417 out:
9583bc14 3418 /* Restore flow */
33bf9176 3419 flow->vlan_tci = flow_vlan_tci;
1362e248 3420 flow->pkt_mark = flow_pkt_mark;
33bf9176 3421 flow->nw_tos = flow_nw_tos;
9583bc14
EJ
3422}
3423
3424static void
e93ef1c7
JR
3425compose_output_action(struct xlate_ctx *ctx, ofp_port_t ofp_port,
3426 const struct xlate_bond_recirc *xr)
9583bc14 3427{
e93ef1c7 3428 compose_output_action__(ctx, ofp_port, xr, true);
9583bc14
EJ
3429}
3430
bb61b33d 3431static void
790c5d26 3432xlate_recursively(struct xlate_ctx *ctx, struct rule_dpif *rule, bool deepens)
bb61b33d
BP
3433{
3434 struct rule_dpif *old_rule = ctx->rule;
8b1e5560 3435 ovs_be64 old_cookie = ctx->rule_cookie;
dc723c44 3436 const struct rule_actions *actions;
bb61b33d
BP
3437
3438 if (ctx->xin->resubmit_stats) {
70742c7f 3439 rule_dpif_credit_stats(rule, ctx->xin->resubmit_stats);
bb61b33d
BP
3440 }
3441
98b07853 3442 ctx->resubmits++;
790c5d26 3443
790c5d26 3444 ctx->depth += deepens;
bb61b33d 3445 ctx->rule = rule;
07a3cd5c
BP
3446 ctx->rule_cookie = rule->up.flow_cookie;
3447 actions = rule_get_actions(&rule->up);
6f00e29b 3448 do_xlate_actions(actions->ofpacts, actions->ofpacts_len, ctx);
8b1e5560 3449 ctx->rule_cookie = old_cookie;
bb61b33d 3450 ctx->rule = old_rule;
790c5d26 3451 ctx->depth -= deepens;
bb61b33d
BP
3452}
3453
bd3240ba
SH
3454static bool
3455xlate_resubmit_resource_check(struct xlate_ctx *ctx)
9583bc14 3456{
790c5d26 3457 if (ctx->depth >= MAX_DEPTH) {
2d9b49dd 3458 xlate_report_error(ctx, "over max translation depth %d", MAX_DEPTH);
fff1b9c0 3459 ctx->error = XLATE_RECURSION_TOO_DEEP;
790c5d26 3460 } else if (ctx->resubmits >= MAX_RESUBMITS) {
2d9b49dd 3461 xlate_report_error(ctx, "over %d resubmit actions", MAX_RESUBMITS);
fff1b9c0 3462 ctx->error = XLATE_TOO_MANY_RESUBMITS;
1520ef4f 3463 } else if (ctx->odp_actions->size > UINT16_MAX) {
2d9b49dd 3464 xlate_report_error(ctx, "resubmits yielded over 64 kB of actions");
fff1b9c0
JR
3465 /* NOT an error, as we'll be slow-pathing the flow in this case? */
3466 ctx->exit = true; /* XXX: translation still terminated! */
6fd6ed71 3467 } else if (ctx->stack.size >= 65536) {
2d9b49dd 3468 xlate_report_error(ctx, "resubmits yielded over 64 kB of stack");
fff1b9c0 3469 ctx->error = XLATE_STACK_TOO_DEEP;
98b07853 3470 } else {
bd3240ba
SH
3471 return true;
3472 }
3473
3474 return false;
3475}
3476
2cd20955
JR
3477static void
3478tuple_swap_flow(struct flow *flow, bool ipv4)
3479{
3480 uint8_t nw_proto = flow->nw_proto;
3481 flow->nw_proto = flow->ct_nw_proto;
3482 flow->ct_nw_proto = nw_proto;
3483
3484 if (ipv4) {
3485 ovs_be32 nw_src = flow->nw_src;
3486 flow->nw_src = flow->ct_nw_src;
3487 flow->ct_nw_src = nw_src;
3488
3489 ovs_be32 nw_dst = flow->nw_dst;
3490 flow->nw_dst = flow->ct_nw_dst;
3491 flow->ct_nw_dst = nw_dst;
3492 } else {
3493 struct in6_addr ipv6_src = flow->ipv6_src;
3494 flow->ipv6_src = flow->ct_ipv6_src;
3495 flow->ct_ipv6_src = ipv6_src;
3496
3497 struct in6_addr ipv6_dst = flow->ipv6_dst;
3498 flow->ipv6_dst = flow->ct_ipv6_dst;
3499 flow->ct_ipv6_dst = ipv6_dst;
3500 }
3501
3502 ovs_be16 tp_src = flow->tp_src;
3503 flow->tp_src = flow->ct_tp_src;
3504 flow->ct_tp_src = tp_src;
3505
3506 ovs_be16 tp_dst = flow->tp_dst;
3507 flow->tp_dst = flow->ct_tp_dst;
3508 flow->ct_tp_dst = tp_dst;
3509}
3510
3511static void
3512tuple_swap(struct flow *flow, struct flow_wildcards *wc)
3513{
3514 bool ipv4 = (flow->dl_type == htons(ETH_TYPE_IP));
3515
3516 tuple_swap_flow(flow, ipv4);
3517 tuple_swap_flow(&wc->masks, ipv4);
3518}
3519
bd3240ba 3520static void
6d328fa2 3521xlate_table_action(struct xlate_ctx *ctx, ofp_port_t in_port, uint8_t table_id,
2cd20955
JR
3522 bool may_packet_in, bool honor_table_miss,
3523 bool with_ct_orig)
bd3240ba 3524{
e12ec36b
SH
3525 /* Check if we need to recirculate before matching in a table. */
3526 if (ctx->was_mpls) {
3527 ctx_trigger_freeze(ctx);
3528 return;
3529 }
bd3240ba 3530 if (xlate_resubmit_resource_check(ctx)) {
9583bc14 3531 uint8_t old_table_id = ctx->table_id;
3f207910 3532 struct rule_dpif *rule;
9583bc14
EJ
3533
3534 ctx->table_id = table_id;
3535
2cd20955
JR
3536 /* Swap packet fields with CT 5-tuple if requested. */
3537 if (with_ct_orig) {
3538 /* Do not swap if there is no CT tuple, or if key is not IP. */
3539 if (ctx->xin->flow.ct_nw_proto == 0 ||
3540 !is_ip_any(&ctx->xin->flow)) {
3541 xlate_report_error(ctx,
3542 "resubmit(ct) with non-tracked or non-IP packet!");
3543 return;
3544 }
3545 tuple_swap(&ctx->xin->flow, ctx->wc);
3546 }
34dd0d78 3547 rule = rule_dpif_lookup_from_table(ctx->xbridge->ofproto,
1f4a8933 3548 ctx->xin->tables_version,
c0e638aa 3549 &ctx->xin->flow, ctx->wc,
34dd0d78
JR
3550 ctx->xin->resubmit_stats,
3551 &ctx->table_id, in_port,
a027899e
JR
3552 may_packet_in, honor_table_miss,
3553 ctx->xin->xcache);
2cd20955
JR
3554 /* Swap back. */
3555 if (with_ct_orig) {
3556 tuple_swap(&ctx->xin->flow, ctx->wc);
3557 }
ad3efdcb 3558
a2143702 3559 if (rule) {
83709dfa
JR
3560 /* Fill in the cache entry here instead of xlate_recursively
3561 * to make the reference counting more explicit. We take a
3562 * reference in the lookups above if we are going to cache the
3563 * rule. */
3564 if (ctx->xin->xcache) {
3565 struct xc_entry *entry;
3566
3567 entry = xlate_cache_add_entry(ctx->xin->xcache, XC_RULE);
901a517e 3568 entry->rule = rule;
07a3cd5c 3569 ofproto_rule_ref(&rule->up);
83709dfa 3570 }
2d9b49dd
BP
3571
3572 struct ovs_list *old_trace = ctx->xin->trace;
3573 xlate_report_table(ctx, rule, table_id);
790c5d26 3574 xlate_recursively(ctx, rule, table_id <= old_table_id);
2d9b49dd 3575 ctx->xin->trace = old_trace;
ad3efdcb
EJ
3576 }
3577
9583bc14 3578 ctx->table_id = old_table_id;
98b07853 3579 return;
9583bc14
EJ
3580 }
3581}
3582
76973237 3583/* Consumes the group reference, which is only taken if xcache exists. */
f4fb341b 3584static void
1e684d7d
RW
3585xlate_group_stats(struct xlate_ctx *ctx, struct group_dpif *group,
3586 struct ofputil_bucket *bucket)
3587{
3588 if (ctx->xin->resubmit_stats) {
3589 group_dpif_credit_stats(group, bucket, ctx->xin->resubmit_stats);
3590 }
3591 if (ctx->xin->xcache) {
3592 struct xc_entry *entry;
3593
3594 entry = xlate_cache_add_entry(ctx->xin->xcache, XC_GROUP);
901a517e
JR
3595 entry->group.group = group;
3596 entry->group.bucket = bucket;
1e684d7d
RW
3597 }
3598}
3599
3600static void
3601xlate_group_bucket(struct xlate_ctx *ctx, struct ofputil_bucket *bucket)
f4fb341b
SH
3602{
3603 uint64_t action_list_stub[1024 / 8];
0a2869d5
BP
3604 struct ofpbuf action_list = OFPBUF_STUB_INITIALIZER(action_list_stub);
3605 struct ofpbuf action_set = ofpbuf_const_initializer(bucket->ofpacts,
3606 bucket->ofpacts_len);
5b09e569 3607 struct flow old_flow = ctx->xin->flow;
e12ec36b 3608 bool old_was_mpls = ctx->was_mpls;
f4fb341b 3609
f4fb341b 3610 ofpacts_execute_action_set(&action_list, &action_set);
790c5d26 3611 ctx->depth++;
6fd6ed71 3612 do_xlate_actions(action_list.data, action_list.size, ctx);
790c5d26 3613 ctx->depth--;
f4fb341b 3614
f4fb341b 3615 ofpbuf_uninit(&action_list);
5b09e569 3616
77ab5fd2 3617 /* Check if need to freeze. */
1d361a81 3618 if (ctx->freezing) {
77ab5fd2 3619 finish_freezing(ctx);
e672ff9b
JR
3620 }
3621
5b09e569
JR
3622 /* Roll back flow to previous state.
3623 * This is equivalent to cloning the packet for each bucket.
3624 *
3625 * As a side effect any subsequently applied actions will
3626 * also effectively be applied to a clone of the packet taken
3627 * just before applying the all or indirect group.
3628 *
3629 * Note that group buckets are action sets, hence they cannot modify the
3630 * main action set. Also any stack actions are ignored when executing an
3631 * action set, so group buckets cannot change the stack either.
3632 * However, we do allow resubmit actions in group buckets, which could
3633 * break the above assumptions. It is up to the controller to not mess up
3634 * with the action_set and stack in the tables resubmitted to from
3635 * group buckets. */
3636 ctx->xin->flow = old_flow;
3637
e12ec36b
SH
3638 /* The group bucket popping MPLS should have no effect after bucket
3639 * execution. */
3640 ctx->was_mpls = old_was_mpls;
3641
5b09e569
JR
3642 /* The fact that the group bucket exits (for any reason) does not mean that
3643 * the translation after the group action should exit. Specifically, if
1d361a81
BP
3644 * the group bucket freezes translation, the actions after the group action
3645 * must continue processing with the original, not the frozen packet! */
5b09e569 3646 ctx->exit = false;
f4fb341b
SH
3647}
3648
3649static void
3650xlate_all_group(struct xlate_ctx *ctx, struct group_dpif *group)
3651{
1e684d7d 3652 struct ofputil_bucket *bucket;
07a3cd5c 3653 LIST_FOR_EACH (bucket, list_node, &group->up.buckets) {
f4fb341b 3654 xlate_group_bucket(ctx, bucket);
f4fb341b 3655 }
1e684d7d 3656 xlate_group_stats(ctx, group, NULL);
f4fb341b
SH
3657}
3658
dd8cd4b4
SH
3659static void
3660xlate_ff_group(struct xlate_ctx *ctx, struct group_dpif *group)
3661{
1e684d7d 3662 struct ofputil_bucket *bucket;
dd8cd4b4
SH
3663
3664 bucket = group_first_live_bucket(ctx, group, 0);
3665 if (bucket) {
3666 xlate_group_bucket(ctx, bucket);
1e684d7d 3667 xlate_group_stats(ctx, group, bucket);
76973237 3668 } else if (ctx->xin->xcache) {
07a3cd5c 3669 ofproto_group_unref(&group->up);
dd8cd4b4
SH
3670 }
3671}
3672
fe7e5749 3673static void
7565c3e4 3674xlate_default_select_group(struct xlate_ctx *ctx, struct group_dpif *group)
fe7e5749 3675{
49a73e0c 3676 struct flow_wildcards *wc = ctx->wc;
1e684d7d 3677 struct ofputil_bucket *bucket;
fe7e5749
SH
3678 uint32_t basis;
3679
1d1aae0b 3680 basis = flow_hash_symmetric_l4(&ctx->xin->flow, 0);
80e3509d 3681 flow_mask_hash_fields(&ctx->xin->flow, wc, NX_HASH_FIELDS_SYMMETRIC_L4);
fe7e5749
SH
3682 bucket = group_best_live_bucket(ctx, group, basis);
3683 if (bucket) {
fe7e5749 3684 xlate_group_bucket(ctx, bucket);
1e684d7d 3685 xlate_group_stats(ctx, group, bucket);
76973237 3686 } else if (ctx->xin->xcache) {
07a3cd5c 3687 ofproto_group_unref(&group->up);
fe7e5749
SH
3688 }
3689}
3690
0c4b9393
SH
3691static void
3692xlate_hash_fields_select_group(struct xlate_ctx *ctx, struct group_dpif *group)
3693{
07a3cd5c
BP
3694 const struct field_array *fields = &group->up.props.fields;
3695 const uint8_t *mask_values = fields->values;
3696 uint32_t basis = hash_uint64(group->up.props.selection_method_param);
0c4b9393 3697
07a3cd5c 3698 size_t i;
e8dba719
JR
3699 BITMAP_FOR_EACH_1 (i, MFF_N_IDS, fields->used.bm) {
3700 const struct mf_field *mf = mf_from_id(i);
0c4b9393 3701
e8dba719
JR
3702 /* Skip fields for which prerequisities are not met. */
3703 if (!mf_are_prereqs_ok(mf, &ctx->xin->flow, ctx->wc)) {
3704 /* Skip the mask bytes for this field. */
3705 mask_values += mf->n_bytes;
3706 continue;
3707 }
0c4b9393 3708
e8dba719
JR
3709 union mf_value value;
3710 union mf_value mask;
0c4b9393 3711
e8dba719
JR
3712 mf_get_value(mf, &ctx->xin->flow, &value);
3713 /* Mask the value. */
3714 for (int j = 0; j < mf->n_bytes; j++) {
3715 mask.b[j] = *mask_values++;
3716 value.b[j] &= mask.b[j];
3717 }
3718 basis = hash_bytes(&value, mf->n_bytes, basis);
1cb20095 3719
e8dba719
JR
3720 /* For tunnels, hash in whether the field is present. */
3721 if (mf_is_tun_metadata(mf)) {
3722 basis = hash_boolean(mf_is_set(mf, &ctx->xin->flow), basis);
0c4b9393 3723 }
e8dba719
JR
3724
3725 mf_mask_field_masked(mf, &mask, ctx->wc);
0c4b9393
SH
3726 }
3727
07a3cd5c 3728 struct ofputil_bucket *bucket = group_best_live_bucket(ctx, group, basis);
0c4b9393
SH
3729 if (bucket) {
3730 xlate_group_bucket(ctx, bucket);
3731 xlate_group_stats(ctx, group, bucket);
76973237 3732 } else if (ctx->xin->xcache) {
07a3cd5c 3733 ofproto_group_unref(&group->up);
0c4b9393
SH
3734 }
3735}
3736
53cc166a
JR
3737static void
3738xlate_dp_hash_select_group(struct xlate_ctx *ctx, struct group_dpif *group)
3739{
3740 struct ofputil_bucket *bucket;
3741
3742 /* dp_hash value 0 is special since it means that the dp_hash has not been
3743 * computed, as all computed dp_hash values are non-zero. Therefore
3744 * compare to zero can be used to decide if the dp_hash value is valid
3745 * without masking the dp_hash field. */
3746 if (!ctx->xin->flow.dp_hash) {
07a3cd5c 3747 uint64_t param = group->up.props.selection_method_param;
53cc166a
JR
3748
3749 ctx_trigger_recirculate_with_hash(ctx, param >> 32, (uint32_t)param);
3750 } else {
07a3cd5c 3751 uint32_t n_buckets = group->up.n_buckets;
53cc166a
JR
3752 if (n_buckets) {
3753 /* Minimal mask to cover the number of buckets. */
3754 uint32_t mask = (1 << log_2_ceil(n_buckets)) - 1;
3755 /* Multiplier chosen to make the trivial 1 bit case to
3756 * actually distribute amongst two equal weight buckets. */
3757 uint32_t basis = 0xc2b73583 * (ctx->xin->flow.dp_hash & mask);
3758
3759 ctx->wc->masks.dp_hash |= mask;
3760 bucket = group_best_live_bucket(ctx, group, basis);
3761 if (bucket) {
3762 xlate_group_bucket(ctx, bucket);
3763 xlate_group_stats(ctx, group, bucket);
3764 }
3765 }
3766 }
3767}
3768
7565c3e4
SH
3769static void
3770xlate_select_group(struct xlate_ctx *ctx, struct group_dpif *group)
3771{
07a3cd5c 3772 const char *selection_method = group->up.props.selection_method;
7565c3e4 3773
e12ec36b
SH
3774 /* Select groups may access flow keys beyond L2 in order to
3775 * select a bucket. Recirculate as appropriate to make this possible.
3776 */
3777 if (ctx->was_mpls) {
3778 ctx_trigger_freeze(ctx);
3779 }
3780
7565c3e4
SH
3781 if (selection_method[0] == '\0') {
3782 xlate_default_select_group(ctx, group);
0c4b9393
SH
3783 } else if (!strcasecmp("hash", selection_method)) {
3784 xlate_hash_fields_select_group(ctx, group);
53cc166a
JR
3785 } else if (!strcasecmp("dp_hash", selection_method)) {
3786 xlate_dp_hash_select_group(ctx, group);
7565c3e4
SH
3787 } else {
3788 /* Parsing of groups should ensure this never happens */
3789 OVS_NOT_REACHED();
3790 }
3791}
3792
f4fb341b
SH
3793static void
3794xlate_group_action__(struct xlate_ctx *ctx, struct group_dpif *group)
3795{
0eb48fe1 3796 bool was_in_group = ctx->in_group;
5a070238
BP
3797 ctx->in_group = true;
3798
07a3cd5c 3799 switch (group->up.type) {
f4fb341b
SH
3800 case OFPGT11_ALL:
3801 case OFPGT11_INDIRECT:
3802 xlate_all_group(ctx, group);
3803 break;
3804 case OFPGT11_SELECT:
fe7e5749 3805 xlate_select_group(ctx, group);
f4fb341b 3806 break;
dd8cd4b4
SH
3807 case OFPGT11_FF:
3808 xlate_ff_group(ctx, group);
3809 break;
f4fb341b 3810 default:
428b2edd 3811 OVS_NOT_REACHED();
f4fb341b 3812 }
5a070238 3813
0eb48fe1 3814 ctx->in_group = was_in_group;
f4fb341b
SH
3815}
3816
3817static bool
3818xlate_group_action(struct xlate_ctx *ctx, uint32_t group_id)
3819{
0eb48fe1 3820 if (xlate_resubmit_resource_check(ctx)) {
f4fb341b 3821 struct group_dpif *group;
f4fb341b 3822
76973237
JR
3823 /* Take ref only if xcache exists. */
3824 group = group_dpif_lookup(ctx->xbridge->ofproto, group_id,
1f4a8933 3825 ctx->xin->tables_version, ctx->xin->xcache);
db88b35c
JR
3826 if (!group) {
3827 /* XXX: Should set ctx->error ? */
2d9b49dd
BP
3828 xlate_report(ctx, OFT_WARN, "output to nonexistent group %"PRIu32,
3829 group_id);
f4fb341b
SH
3830 return true;
3831 }
db88b35c 3832 xlate_group_action__(ctx, group);
f4fb341b
SH
3833 }
3834
3835 return false;
3836}
3837
9583bc14
EJ
3838static void
3839xlate_ofpact_resubmit(struct xlate_ctx *ctx,
3840 const struct ofpact_resubmit *resubmit)
3841{
4e022ec0 3842 ofp_port_t in_port;
9583bc14 3843 uint8_t table_id;
adcf00ba
AZ
3844 bool may_packet_in = false;
3845 bool honor_table_miss = false;
3846
3847 if (ctx->rule && rule_dpif_is_internal(ctx->rule)) {
3848 /* Still allow missed packets to be sent to the controller
3849 * if resubmitting from an internal table. */
3850 may_packet_in = true;
3851 honor_table_miss = true;
3852 }
9583bc14
EJ
3853
3854 in_port = resubmit->in_port;
3855 if (in_port == OFPP_IN_PORT) {
4e022ec0 3856 in_port = ctx->xin->flow.in_port.ofp_port;
9583bc14
EJ
3857 }
3858
3859 table_id = resubmit->table_id;
3860 if (table_id == 255) {
3861 table_id = ctx->table_id;
3862 }
3863
adcf00ba 3864 xlate_table_action(ctx, in_port, table_id, may_packet_in,
2cd20955 3865 honor_table_miss, resubmit->with_ct_orig);
9583bc14
EJ
3866}
3867
3868static void
3869flood_packets(struct xlate_ctx *ctx, bool all)
3870{
46c88433 3871 const struct xport *xport;
9583bc14 3872
46c88433
EJ
3873 HMAP_FOR_EACH (xport, ofp_node, &ctx->xbridge->xports) {
3874 if (xport->ofp_port == ctx->xin->flow.in_port.ofp_port) {
9583bc14
EJ
3875 continue;
3876 }
3877
3878 if (all) {
e93ef1c7 3879 compose_output_action__(ctx, xport->ofp_port, NULL, false);
46c88433 3880 } else if (!(xport->config & OFPUTIL_PC_NO_FLOOD)) {
e93ef1c7 3881 compose_output_action(ctx, xport->ofp_port, NULL);
9583bc14
EJ
3882 }
3883 }
3884
2031ef97 3885 ctx->nf_output_iface = NF_OUT_FLOOD;
9583bc14
EJ
3886}
3887
27d931da
AZ
3888/* Copy and reformat a partially xlated odp actions to a new
3889 * odp actions list in 'b', so that the new actions list
3890 * can be executed by odp_execute_actions.
3891 *
3892 * When xlate using nested odp actions, such as sample and clone,
3893 * the nested action created by nl_msg_start_nested() may not
3894 * have been properly closed yet, thus can not be executed
3895 * directly.
3896 *
3897 * Since unclosed nested action has to be last action, it can be
3898 * fixed by skipping the outer header, and treating the actions within
3899 * as if they are outside the nested attribute since the effect
3900 * of executing them on packet is the same.
3901 *
3902 * As an optimization, a fully closed 'sample' or 'clone' action
3903 * is skipped since their execution has no effect to the packet.
3904 *
3905 * Returns true if success. 'b' contains the new actions list.
3906 * The caller is responsible for disposing 'b'.
3907 *
3908 * Returns false if error, 'b' has been freed already. */
3909static bool
3910xlate_fixup_actions(struct ofpbuf *b, const struct nlattr *actions,
3911 size_t actions_len)
3912{
3913 const struct nlattr *a;
3914 unsigned int left;
3915
3916 NL_ATTR_FOR_EACH_UNSAFE (a, left, actions, actions_len) {
3917 int type = nl_attr_type(a);
3918
3919 switch ((enum ovs_action_attr) type) {
3920 case OVS_ACTION_ATTR_HASH:
3921 case OVS_ACTION_ATTR_PUSH_VLAN:
3922 case OVS_ACTION_ATTR_POP_VLAN:
3923 case OVS_ACTION_ATTR_PUSH_MPLS:
3924 case OVS_ACTION_ATTR_POP_MPLS:
3925 case OVS_ACTION_ATTR_SET:
3926 case OVS_ACTION_ATTR_SET_MASKED:
3927 case OVS_ACTION_ATTR_TRUNC:
3928 case OVS_ACTION_ATTR_OUTPUT:
3929 case OVS_ACTION_ATTR_TUNNEL_PUSH:
3930 case OVS_ACTION_ATTR_TUNNEL_POP:
3931 case OVS_ACTION_ATTR_USERSPACE:
3932 case OVS_ACTION_ATTR_RECIRC:
3933 case OVS_ACTION_ATTR_CT:
0d11fc52
JR
3934 case OVS_ACTION_ATTR_PUSH_ETH:
3935 case OVS_ACTION_ATTR_POP_ETH:
5dddf960 3936 case OVS_ACTION_ATTR_METER:
27d931da
AZ
3937 ofpbuf_put(b, a, nl_attr_len_pad(a, left));
3938 break;
3939
3940 case OVS_ACTION_ATTR_CLONE:
3941 /* If the clone action has been fully xlated, it can
3942 * be skipped, since any actions executed within clone
3943 * do not affect the current packet.
3944 *
3945 * When xlating actions within clone, the clone action,
3946 * because it is an nested netlink attribute, do not have
3947 * a valid 'nla_len'; it will be zero instead. Skip
3948 * the clone header to find the start of the actions
3949 * enclosed. Treat those actions as if they are written
3950 * outside of clone. */
3951 if (!a->nla_len) {
3952 bool ok;
3953 if (left < NLA_HDRLEN) {
3954 goto error;
3955 }
3956
3957 ok = xlate_fixup_actions(b, nl_attr_get_unspec(a, 0),
3958 left - NLA_HDRLEN);
3959 if (!ok) {
3960 goto error;
3961 }
3962 }
3963 break;
3964
3965 case OVS_ACTION_ATTR_SAMPLE:
3966 if (!a->nla_len) {
3967 bool ok;
3968 if (left < NLA_HDRLEN) {
3969 goto error;
3970 }
3971 const struct nlattr *attr = nl_attr_get_unspec(a, 0);
3972 left -= NLA_HDRLEN;
3973
3974 while (left > 0 &&
3975 nl_attr_type(attr) != OVS_SAMPLE_ATTR_ACTIONS) {
3976 /* Only OVS_SAMPLE_ATTR_ACTIONS can have unclosed
3977 * nested netlink attribute. */
3978 if (!attr->nla_len) {
3979 goto error;
3980 }
3981
3982 left -= NLA_ALIGN(attr->nla_len);
3983 attr = nl_attr_next(attr);
3984 }
3985
3986 if (left < NLA_HDRLEN) {
3987 goto error;
3988 }
3989
3990 ok = xlate_fixup_actions(b, nl_attr_get_unspec(attr, 0),
3991 left - NLA_HDRLEN);
3992 if (!ok) {
3993 goto error;
3994 }
3995 }
3996 break;
3997
3998 case OVS_ACTION_ATTR_UNSPEC:
3999 case __OVS_ACTION_ATTR_MAX:
4000 OVS_NOT_REACHED();
4001 }
4002 }
4003
4004 return true;
4005
4006error:
4007 ofpbuf_delete(b);
4008 return false;
4009}
4010
4011static bool
4012xlate_execute_odp_actions(struct dp_packet *packet,
4013 const struct nlattr *actions, int actions_len)
4014{
4015 struct dp_packet_batch batch;
4016 struct ofpbuf *b = ofpbuf_new(actions_len);
4017
4018 if (!xlate_fixup_actions(b, actions, actions_len)) {
4019 return false;
4020 }
4021
4022 dp_packet_batch_init_packet(&batch, packet);
4023 odp_execute_actions(NULL, &batch, false, b->data, b->size, NULL);
4024 ofpbuf_delete(b);
4025
4026 return true;
4027}
4028
9583bc14
EJ
4029static void
4030execute_controller_action(struct xlate_ctx *ctx, int len,
4031 enum ofp_packet_in_reason reason,
bdcad671
BP
4032 uint16_t controller_id,
4033 const uint8_t *userdata, size_t userdata_len)
9583bc14 4034{
e14deea0 4035 struct dp_packet *packet;
9583bc14 4036
04594cd5 4037 ctx->xout->slow |= SLOW_CONTROLLER;
b476e2f2 4038 xlate_commit_actions(ctx);
9583bc14
EJ
4039 if (!ctx->xin->packet) {
4040 return;
4041 }
4042
df70a773
JR
4043 if (!ctx->xin->allow_side_effects && !ctx->xin->xcache) {
4044 return;
4045 }
4046
cf62fa4c 4047 packet = dp_packet_clone(ctx->xin->packet);
27d931da
AZ
4048 if (!xlate_execute_odp_actions(packet, ctx->odp_actions->data,
4049 ctx->odp_actions->size)) {
4050 xlate_report_error(ctx, "Failed to execute controller action");
4051 dp_packet_delete(packet);
4052 return;
4053 }
9bfe9334
BP
4054 /* A packet sent by an action in a table-miss rule is considered an
4055 * explicit table miss. OpenFlow before 1.3 doesn't have that concept so
4056 * it will get translated back to OFPR_ACTION for those versions. */
4057 if (reason == OFPR_ACTION
07a3cd5c 4058 && ctx->rule && rule_is_table_miss(&ctx->rule->up)) {
9bfe9334
BP
4059 reason = OFPR_EXPLICIT_MISS;
4060 }
4061
4062 size_t packet_len = dp_packet_size(packet);
0fb7792a 4063
a2b53dec
BP
4064 struct ofproto_async_msg *am = xmalloc(sizeof *am);
4065 *am = (struct ofproto_async_msg) {
9bfe9334 4066 .controller_id = controller_id,
a2b53dec
BP
4067 .oam = OAM_PACKET_IN,
4068 .pin = {
4069 .up = {
77ab5fd2
BP
4070 .public = {
4071 .packet = dp_packet_steal_data(packet),
4072 .packet_len = packet_len,
4073 .reason = reason,
4074 .table_id = ctx->table_id,
4075 .cookie = ctx->rule_cookie,
4076 .userdata = (userdata_len
4077 ? xmemdup(userdata, userdata_len)
4078 : NULL),
4079 .userdata_len = userdata_len,
4080 }
a2b53dec
BP
4081 },
4082 .max_len = len,
9bfe9334 4083 },
9bfe9334 4084 };
77ab5fd2 4085 flow_get_metadata(&ctx->xin->flow, &am->pin.up.public.flow_metadata);
9583bc14 4086
df70a773
JR
4087 /* Async messages are only sent once, so if we send one now, no
4088 * xlate cache entry is created. */
4089 if (ctx->xin->allow_side_effects) {
4090 ofproto_dpif_send_async_msg(ctx->xbridge->ofproto, am);
4091 } else /* xcache */ {
4092 struct xc_entry *entry;
4093
4094 entry = xlate_cache_add_entry(ctx->xin->xcache, XC_CONTROLLER);
4095 entry->controller.ofproto = ctx->xbridge->ofproto;
4096 entry->controller.am = am;
4097 }
3b4fff43
RM
4098
4099 dp_packet_delete(packet);
9583bc14
EJ
4100}
4101
7bbdd84f 4102static void
77ab5fd2 4103emit_continuation(struct xlate_ctx *ctx, const struct frozen_state *state)
7bbdd84f 4104{
df70a773
JR
4105 if (!ctx->xin->allow_side_effects && !ctx->xin->xcache) {
4106 return;
4107 }
4108
77ab5fd2
BP
4109 struct ofproto_async_msg *am = xmalloc(sizeof *am);
4110 *am = (struct ofproto_async_msg) {
4111 .controller_id = ctx->pause->controller_id,
4112 .oam = OAM_PACKET_IN,
4113 .pin = {
4114 .up = {
4115 .public = {
4116 .userdata = xmemdup(ctx->pause->userdata,
4117 ctx->pause->userdata_len),
4118 .userdata_len = ctx->pause->userdata_len,
4119 .packet = xmemdup(dp_packet_data(ctx->xin->packet),
4120 dp_packet_size(ctx->xin->packet)),
4121 .packet_len = dp_packet_size(ctx->xin->packet),
0b024e49 4122 .reason = ctx->pause->reason,
77ab5fd2 4123 },
07a3cd5c 4124 .bridge = ctx->xbridge->ofproto->uuid,
84cf3c1f
JR
4125 .stack = xmemdup(state->stack, state->stack_size),
4126 .stack_size = state->stack_size,
77ab5fd2
BP
4127 .mirrors = state->mirrors,
4128 .conntracked = state->conntracked,
4129 .actions = xmemdup(state->ofpacts, state->ofpacts_len),
4130 .actions_len = state->ofpacts_len,
4131 .action_set = xmemdup(state->action_set,
4132 state->action_set_len),
4133 .action_set_len = state->action_set_len,
4134 },
4135 .max_len = UINT16_MAX,
4136 },
4137 };
4138 flow_get_metadata(&ctx->xin->flow, &am->pin.up.public.flow_metadata);
df70a773
JR
4139
4140 /* Async messages are only sent once, so if we send one now, no
4141 * xlate cache entry is created. */
4142 if (ctx->xin->allow_side_effects) {
4143 ofproto_dpif_send_async_msg(ctx->xbridge->ofproto, am);
4144 } else /* xcache */ {
4145 struct xc_entry *entry;
4146
4147 entry = xlate_cache_add_entry(ctx->xin->xcache, XC_CONTROLLER);
4148 entry->controller.ofproto = ctx->xbridge->ofproto;
4149 entry->controller.am = am;
4150 }
77ab5fd2 4151}
7bbdd84f 4152
77ab5fd2
BP
4153static void
4154finish_freezing__(struct xlate_ctx *ctx, uint8_t table)
4155{
1d361a81 4156 ovs_assert(ctx->freezing);
7bbdd84f 4157
1d361a81 4158 struct frozen_state state = {
07659514 4159 .table_id = table,
07a3cd5c 4160 .ofproto_uuid = ctx->xbridge->ofproto->uuid,
5c1b2314 4161 .stack = ctx->stack.data,
84cf3c1f 4162 .stack_size = ctx->stack.size,
29bae541 4163 .mirrors = ctx->mirrors,
07659514 4164 .conntracked = ctx->conntracked,
1d361a81
BP
4165 .ofpacts = ctx->frozen_actions.data,
4166 .ofpacts_len = ctx->frozen_actions.size,
417509fa 4167 .action_set = ctx->action_set.data,
8a5fb3b4 4168 .action_set_len = ctx->action_set.size,
2082425c 4169 };
77ab5fd2 4170 frozen_metadata_from_flow(&state.metadata, &ctx->xin->flow);
2082425c 4171
77ab5fd2
BP
4172 if (ctx->pause) {
4173 if (ctx->xin->packet) {
4174 emit_continuation(ctx, &state);
4175 }
4176 } else {
4177 /* Allocate a unique recirc id for the given metadata state in the
4178 * flow. An existing id, with a new reference to the corresponding
4179 * recirculation context, will be returned if possible.
4180 * The life-cycle of this recirc id is managed by associating it
4181 * with the udpif key ('ukey') created for each new datapath flow. */
4182 uint32_t id = recirc_alloc_id_ctx(&state);
4183 if (!id) {
2d9b49dd 4184 xlate_report_error(ctx, "Failed to allocate recirculation id");
77ab5fd2
BP
4185 ctx->error = XLATE_NO_RECIRCULATION_CONTEXT;
4186 return;
4187 }
4188 recirc_refs_add(&ctx->xout->recircs, id);
7bbdd84f 4189
53cc166a
JR
4190 if (ctx->recirc_update_dp_hash) {
4191 struct ovs_action_hash *act_hash;
4192
4193 /* Hash action. */
4194 act_hash = nl_msg_put_unspec_uninit(ctx->odp_actions,
4195 OVS_ACTION_ATTR_HASH,
4196 sizeof *act_hash);
4197 act_hash->hash_alg = OVS_HASH_ALG_L4; /* Make configurable. */
4198 act_hash->hash_basis = 0; /* Make configurable. */
4199 }
77ab5fd2
BP
4200 nl_msg_put_u32(ctx->odp_actions, OVS_ACTION_ATTR_RECIRC, id);
4201 }
e672ff9b 4202
1d361a81
BP
4203 /* Undo changes done by freezing. */
4204 ctx_cancel_freeze(ctx);
7bbdd84f
SH
4205}
4206
1d361a81 4207/* Called only when we're freezing. */
07659514 4208static void
77ab5fd2 4209finish_freezing(struct xlate_ctx *ctx)
07659514
JS
4210{
4211 xlate_commit_actions(ctx);
77ab5fd2 4212 finish_freezing__(ctx, 0);
07659514
JS
4213}
4214
e37b8437
JS
4215/* Fork the pipeline here. The current packet will continue processing the
4216 * current action list. A clone of the current packet will recirculate, skip
4217 * the remainder of the current action list and asynchronously resume pipeline
4218 * processing in 'table' with the current metadata and action set. */
4219static void
4220compose_recirculate_and_fork(struct xlate_ctx *ctx, uint8_t table)
4221{
1d361a81 4222 ctx->freezing = true;
77ab5fd2 4223 finish_freezing__(ctx, table);
e37b8437
JS
4224}
4225
8bfd0fda
BP
4226static void
4227compose_mpls_push_action(struct xlate_ctx *ctx, struct ofpact_push_mpls *mpls)
9583bc14 4228{
33bf9176 4229 struct flow *flow = &ctx->xin->flow;
8bfd0fda 4230 int n;
33bf9176 4231
8bfd0fda 4232 ovs_assert(eth_type_mpls(mpls->ethertype));
b0a17866 4233
49a73e0c 4234 n = flow_count_mpls_labels(flow, ctx->wc);
8bfd0fda 4235 if (!n) {
704bb0bf 4236 xlate_commit_actions(ctx);
8bfd0fda
BP
4237 } else if (n >= FLOW_MAX_MPLS_LABELS) {
4238 if (ctx->xin->packet != NULL) {
2d9b49dd
BP
4239 xlate_report_error(ctx, "dropping packet on which an MPLS push "
4240 "action can't be performed as it would have "
4241 "more MPLS LSEs than the %d supported.",
4242 FLOW_MAX_MPLS_LABELS);
9583bc14 4243 }
fff1b9c0 4244 ctx->error = XLATE_TOO_MANY_MPLS_LABELS;
8bfd0fda 4245 return;
9583bc14 4246 }
b0a17866 4247
742c0ac3
JR
4248 /* Update flow's MPLS stack, and clear L3/4 fields to mark them invalid. */
4249 flow_push_mpls(flow, n, mpls->ethertype, ctx->wc, true);
9583bc14
EJ
4250}
4251
8bfd0fda 4252static void
9cfef3d0 4253compose_mpls_pop_action(struct xlate_ctx *ctx, ovs_be16 eth_type)
9583bc14 4254{
8bfd0fda 4255 struct flow *flow = &ctx->xin->flow;
49a73e0c 4256 int n = flow_count_mpls_labels(flow, ctx->wc);
33bf9176 4257
49a73e0c 4258 if (flow_pop_mpls(flow, n, eth_type, ctx->wc)) {
8bf009bf 4259 if (!eth_type_mpls(eth_type) && ctx->xbridge->support.odp.recirc) {
e12ec36b 4260 ctx->was_mpls = true;
7bbdd84f
SH
4261 }
4262 } else if (n >= FLOW_MAX_MPLS_LABELS) {
8bfd0fda 4263 if (ctx->xin->packet != NULL) {
2d9b49dd
BP
4264 xlate_report_error(ctx, "dropping packet on which an "
4265 "MPLS pop action can't be performed as it has "
4266 "more MPLS LSEs than the %d supported.",
4267 FLOW_MAX_MPLS_LABELS);
8bfd0fda 4268 }
fff1b9c0 4269 ctx->error = XLATE_TOO_MANY_MPLS_LABELS;
1520ef4f 4270 ofpbuf_clear(ctx->odp_actions);
9583bc14
EJ
4271 }
4272}
4273
4274static bool
4275compose_dec_ttl(struct xlate_ctx *ctx, struct ofpact_cnt_ids *ids)
4276{
33bf9176
BP
4277 struct flow *flow = &ctx->xin->flow;
4278
4279 if (!is_ip_any(flow)) {
9583bc14
EJ
4280 return false;
4281 }
4282
49a73e0c 4283 ctx->wc->masks.nw_ttl = 0xff;
33bf9176
BP
4284 if (flow->nw_ttl > 1) {
4285 flow->nw_ttl--;
9583bc14
EJ
4286 return false;
4287 } else {
4288 size_t i;
4289
4290 for (i = 0; i < ids->n_controllers; i++) {
4291 execute_controller_action(ctx, UINT16_MAX, OFPR_INVALID_TTL,
bdcad671 4292 ids->cnt_ids[i], NULL, 0);
9583bc14
EJ
4293 }
4294
4295 /* Stop processing for current table. */
2d9b49dd
BP
4296 xlate_report(ctx, OFT_WARN, "IPv%d decrement TTL exception",
4297 flow->dl_type == htons(ETH_TYPE_IP) ? 4 : 6);
9583bc14
EJ
4298 return true;
4299 }
4300}
4301
8bfd0fda 4302static void
097d4939
JR
4303compose_set_mpls_label_action(struct xlate_ctx *ctx, ovs_be32 label)
4304{
8bfd0fda 4305 if (eth_type_mpls(ctx->xin->flow.dl_type)) {
49a73e0c 4306 ctx->wc->masks.mpls_lse[0] |= htonl(MPLS_LABEL_MASK);
8bfd0fda 4307 set_mpls_lse_label(&ctx->xin->flow.mpls_lse[0], label);
097d4939 4308 }
097d4939
JR
4309}
4310
8bfd0fda 4311static void
097d4939
JR
4312compose_set_mpls_tc_action(struct xlate_ctx *ctx, uint8_t tc)
4313{
8bfd0fda 4314 if (eth_type_mpls(ctx->xin->flow.dl_type)) {
49a73e0c 4315 ctx->wc->masks.mpls_lse[0] |= htonl(MPLS_TC_MASK);
8bfd0fda 4316 set_mpls_lse_tc(&ctx->xin->flow.mpls_lse[0], tc);
097d4939 4317 }
097d4939
JR
4318}
4319
8bfd0fda 4320static void
9cfef3d0 4321compose_set_mpls_ttl_action(struct xlate_ctx *ctx, uint8_t ttl)
9583bc14 4322{
8bfd0fda 4323 if (eth_type_mpls(ctx->xin->flow.dl_type)) {
49a73e0c 4324 ctx->wc->masks.mpls_lse[0] |= htonl(MPLS_TTL_MASK);
8bfd0fda 4325 set_mpls_lse_ttl(&ctx->xin->flow.mpls_lse[0], ttl);
b0a17866 4326 }
9583bc14
EJ
4327}
4328
4329static bool
9cfef3d0 4330compose_dec_mpls_ttl_action(struct xlate_ctx *ctx)
9583bc14 4331{
33bf9176 4332 struct flow *flow = &ctx->xin->flow;
1dd35f8a 4333
8bfd0fda 4334 if (eth_type_mpls(flow->dl_type)) {
22d38fca
JR
4335 uint8_t ttl = mpls_lse_to_ttl(flow->mpls_lse[0]);
4336
49a73e0c 4337 ctx->wc->masks.mpls_lse[0] |= htonl(MPLS_TTL_MASK);
8bfd0fda
BP
4338 if (ttl > 1) {
4339 ttl--;
4340 set_mpls_lse_ttl(&flow->mpls_lse[0], ttl);
4341 return false;
4342 } else {
bdcad671
BP
4343 execute_controller_action(ctx, UINT16_MAX, OFPR_INVALID_TTL, 0,
4344 NULL, 0);
8bfd0fda 4345 }
9583bc14 4346 }
22d38fca
JR
4347
4348 /* Stop processing for current table. */
2d9b49dd 4349 xlate_report(ctx, OFT_WARN, "MPLS decrement TTL exception");
22d38fca 4350 return true;
9583bc14
EJ
4351}
4352
4353static void
4354xlate_output_action(struct xlate_ctx *ctx,
4e022ec0 4355 ofp_port_t port, uint16_t max_len, bool may_packet_in)
9583bc14 4356{
2031ef97 4357 ofp_port_t prev_nf_output_iface = ctx->nf_output_iface;
9583bc14 4358
2031ef97 4359 ctx->nf_output_iface = NF_OUT_DROP;
9583bc14
EJ
4360
4361 switch (port) {
4362 case OFPP_IN_PORT:
e93ef1c7 4363 compose_output_action(ctx, ctx->xin->flow.in_port.ofp_port, NULL);
9583bc14
EJ
4364 break;
4365 case OFPP_TABLE:
4e022ec0 4366 xlate_table_action(ctx, ctx->xin->flow.in_port.ofp_port,
2cd20955 4367 0, may_packet_in, true, false);
9583bc14
EJ
4368 break;
4369 case OFPP_NORMAL:
4370 xlate_normal(ctx);
4371 break;
4372 case OFPP_FLOOD:
4373 flood_packets(ctx, false);
4374 break;
4375 case OFPP_ALL:
4376 flood_packets(ctx, true);
4377 break;
4378 case OFPP_CONTROLLER:
3a11fd5b 4379 execute_controller_action(ctx, max_len,
029ca940
SS
4380 (ctx->in_group ? OFPR_GROUP
4381 : ctx->in_action_set ? OFPR_ACTION_SET
4382 : OFPR_ACTION),
bdcad671 4383 0, NULL, 0);
9583bc14
EJ
4384 break;
4385 case OFPP_NONE:
4386 break;
4387 case OFPP_LOCAL:
4388 default:
4e022ec0 4389 if (port != ctx->xin->flow.in_port.ofp_port) {
e93ef1c7 4390 compose_output_action(ctx, port, NULL);
9583bc14 4391 } else {
2d9b49dd 4392 xlate_report(ctx, OFT_WARN, "skipping output to input port");
9583bc14
EJ
4393 }
4394 break;
4395 }
4396
4397 if (prev_nf_output_iface == NF_OUT_FLOOD) {
2031ef97
BP
4398 ctx->nf_output_iface = NF_OUT_FLOOD;
4399 } else if (ctx->nf_output_iface == NF_OUT_DROP) {
4400 ctx->nf_output_iface = prev_nf_output_iface;
9583bc14 4401 } else if (prev_nf_output_iface != NF_OUT_DROP &&
2031ef97
BP
4402 ctx->nf_output_iface != NF_OUT_FLOOD) {
4403 ctx->nf_output_iface = NF_OUT_MULTI;
9583bc14
EJ
4404 }
4405}
4406
4407static void
4408xlate_output_reg_action(struct xlate_ctx *ctx,
4409 const struct ofpact_output_reg *or)
4410{
4411 uint64_t port = mf_get_subfield(&or->src, &ctx->xin->flow);
4412 if (port <= UINT16_MAX) {
2d9b49dd
BP
4413 xlate_report(ctx, OFT_DETAIL, "output port is %"PRIu64, port);
4414
9583bc14
EJ
4415 union mf_subvalue value;
4416
4417 memset(&value, 0xff, sizeof value);
49a73e0c 4418 mf_write_subfield_flow(&or->src, &value, &ctx->wc->masks);
2d9b49dd
BP
4419 xlate_output_action(ctx, u16_to_ofp(port), or->max_len, false);
4420 } else {
4421 xlate_report(ctx, OFT_WARN, "output port %"PRIu64" is out of range",
4422 port);
9583bc14
EJ
4423 }
4424}
4425
aaca4fe0
WT
4426static void
4427xlate_output_trunc_action(struct xlate_ctx *ctx,
4428 ofp_port_t port, uint32_t max_len)
4429{
4430 bool support_trunc = ctx->xbridge->support.trunc;
4431 struct ovs_action_trunc *trunc;
4432 char name[OFP_MAX_PORT_NAME_LEN];
4433
4434 switch (port) {
4435 case OFPP_TABLE:
4436 case OFPP_NORMAL:
4437 case OFPP_FLOOD:
4438 case OFPP_ALL:
4439 case OFPP_CONTROLLER:
4440 case OFPP_NONE:
4441 ofputil_port_to_string(port, name, sizeof name);
2d9b49dd
BP
4442 xlate_report(ctx, OFT_WARN,
4443 "output_trunc does not support port: %s", name);
aaca4fe0
WT
4444 break;
4445 case OFPP_LOCAL:
4446 case OFPP_IN_PORT:
4447 default:
4448 if (port != ctx->xin->flow.in_port.ofp_port) {
4449 const struct xport *xport = get_ofp_port(ctx->xbridge, port);
4450
4451 if (xport == NULL || xport->odp_port == ODPP_NONE) {
4452 /* Since truncate happens at its following output action, if
4453 * the output port is a patch port, the behavior is somehow
4454 * unpredicable. For simpilicity, disallow this case. */
4455 ofputil_port_to_string(port, name, sizeof name);
2d9b49dd
BP
4456 xlate_report_error(ctx, "output_trunc does not support "
4457 "patch port %s", name);
aaca4fe0
WT
4458 break;
4459 }
4460
4461 trunc = nl_msg_put_unspec_uninit(ctx->odp_actions,
4462 OVS_ACTION_ATTR_TRUNC,
4463 sizeof *trunc);
4464 trunc->max_len = max_len;
4465 xlate_output_action(ctx, port, max_len, false);
4466 if (!support_trunc) {
4467 ctx->xout->slow |= SLOW_ACTION;
4468 }
4469 } else {
2d9b49dd 4470 xlate_report(ctx, OFT_WARN, "skipping output to input port");
aaca4fe0
WT
4471 }
4472 break;
4473 }
4474}
4475
9583bc14
EJ
4476static void
4477xlate_enqueue_action(struct xlate_ctx *ctx,
4478 const struct ofpact_enqueue *enqueue)
4479{
4e022ec0 4480 ofp_port_t ofp_port = enqueue->port;
9583bc14
EJ
4481 uint32_t queue_id = enqueue->queue;
4482 uint32_t flow_priority, priority;
4483 int error;
4484
4485 /* Translate queue to priority. */
89a8a7f0 4486 error = dpif_queue_to_priority(ctx->xbridge->dpif, queue_id, &priority);
9583bc14
EJ
4487 if (error) {
4488 /* Fall back to ordinary output action. */
4489 xlate_output_action(ctx, enqueue->port, 0, false);
4490 return;
4491 }
4492
4493 /* Check output port. */
4494 if (ofp_port == OFPP_IN_PORT) {
4e022ec0
AW
4495 ofp_port = ctx->xin->flow.in_port.ofp_port;
4496 } else if (ofp_port == ctx->xin->flow.in_port.ofp_port) {
9583bc14
EJ
4497 return;
4498 }
4499
4500 /* Add datapath actions. */
4501 flow_priority = ctx->xin->flow.skb_priority;
4502 ctx->xin->flow.skb_priority = priority;
e93ef1c7 4503 compose_output_action(ctx, ofp_port, NULL);
9583bc14
EJ
4504 ctx->xin->flow.skb_priority = flow_priority;
4505
4506 /* Update NetFlow output port. */
2031ef97
BP
4507 if (ctx->nf_output_iface == NF_OUT_DROP) {
4508 ctx->nf_output_iface = ofp_port;
4509 } else if (ctx->nf_output_iface != NF_OUT_FLOOD) {
4510 ctx->nf_output_iface = NF_OUT_MULTI;
9583bc14
EJ
4511 }
4512}
4513
4514static void
4515xlate_set_queue_action(struct xlate_ctx *ctx, uint32_t queue_id)
4516{
4517 uint32_t skb_priority;
4518
89a8a7f0 4519 if (!dpif_queue_to_priority(ctx->xbridge->dpif, queue_id, &skb_priority)) {
9583bc14
EJ
4520 ctx->xin->flow.skb_priority = skb_priority;
4521 } else {
4522 /* Couldn't translate queue to a priority. Nothing to do. A warning
4523 * has already been logged. */
4524 }
4525}
4526
4527static bool
46c88433 4528slave_enabled_cb(ofp_port_t ofp_port, void *xbridge_)
9583bc14 4529{
46c88433
EJ
4530 const struct xbridge *xbridge = xbridge_;
4531 struct xport *port;
9583bc14
EJ
4532
4533 switch (ofp_port) {
4534 case OFPP_IN_PORT:
4535 case OFPP_TABLE:
4536 case OFPP_NORMAL:
4537 case OFPP_FLOOD:
4538 case OFPP_ALL:
4539 case OFPP_NONE:
4540 return true;
4541 case OFPP_CONTROLLER: /* Not supported by the bundle action. */
4542 return false;
4543 default:
46c88433 4544 port = get_ofp_port(xbridge, ofp_port);
9583bc14
EJ
4545 return port ? port->may_enable : false;
4546 }
4547}
4548
4549static void
4550xlate_bundle_action(struct xlate_ctx *ctx,
4551 const struct ofpact_bundle *bundle)
4552{
4e022ec0 4553 ofp_port_t port;
9583bc14 4554
49a73e0c 4555 port = bundle_execute(bundle, &ctx->xin->flow, ctx->wc, slave_enabled_cb,
46c88433 4556 CONST_CAST(struct xbridge *, ctx->xbridge));
9583bc14 4557 if (bundle->dst.field) {
49a73e0c 4558 nxm_reg_load(&bundle->dst, ofp_to_u16(port), &ctx->xin->flow, ctx->wc);
2d9b49dd 4559 xlate_report_subfield(ctx, &bundle->dst);
9583bc14
EJ
4560 } else {
4561 xlate_output_action(ctx, port, 0, false);
4562 }
4563}
4564
4165b5e0
JS
4565static void
4566xlate_learn_action(struct xlate_ctx *ctx, const struct ofpact_learn *learn)
4567{
49a73e0c 4568 learn_mask(learn, ctx->wc);
9583bc14 4569
df70a773 4570 if (ctx->xin->xcache || ctx->xin->allow_side_effects) {
4165b5e0
JS
4571 uint64_t ofpacts_stub[1024 / 8];
4572 struct ofputil_flow_mod fm;
2c7ee524 4573 struct ofproto_flow_mod ofm__, *ofm;
4165b5e0 4574 struct ofpbuf ofpacts;
2c7ee524
JR
4575 enum ofperr error;
4576
4577 if (ctx->xin->xcache) {
4578 struct xc_entry *entry;
4579
4580 entry = xlate_cache_add_entry(ctx->xin->xcache, XC_LEARN);
4581 entry->learn.ofm = xmalloc(sizeof *entry->learn.ofm);
4582 ofm = entry->learn.ofm;
4583 } else {
4584 ofm = &ofm__;
4585 }
4165b5e0
JS
4586
4587 ofpbuf_use_stub(&ofpacts, ofpacts_stub, sizeof ofpacts_stub);
2c7ee524 4588 learn_execute(learn, &ctx->xin->flow, &fm, &ofpacts);
2d9b49dd
BP
4589 if (OVS_UNLIKELY(ctx->xin->trace)) {
4590 struct ds s = DS_EMPTY_INITIALIZER;
4591 ds_put_format(&s, "table=%"PRIu8" ", fm.table_id);
4592 match_format(&fm.match, &s, OFP_DEFAULT_PRIORITY);
4593 ds_chomp(&s, ' ');
4594 ds_put_format(&s, " priority=%d", fm.priority);
4595 if (fm.new_cookie) {
4596 ds_put_format(&s, " cookie=%#"PRIx64, ntohll(fm.new_cookie));
4597 }
4598 if (fm.idle_timeout != OFP_FLOW_PERMANENT) {
4599 ds_put_format(&s, " idle=%"PRIu16, fm.idle_timeout);
4600 }
4601 if (fm.hard_timeout != OFP_FLOW_PERMANENT) {
4602 ds_put_format(&s, " hard=%"PRIu16, fm.hard_timeout);
4603 }
4604 if (fm.flags & NX_LEARN_F_SEND_FLOW_REM) {
4605 ds_put_cstr(&s, " send_flow_rem");
4606 }
4607 ds_put_cstr(&s, " actions=");
4608 ofpacts_format(fm.ofpacts, fm.ofpacts_len, &s);
4609 xlate_report(ctx, OFT_DETAIL, "%s", ds_cstr(&s));
4610 ds_destroy(&s);
4611 }
2c7ee524
JR
4612 error = ofproto_dpif_flow_mod_init_for_learn(ctx->xbridge->ofproto,
4613 &fm, ofm);
4165b5e0 4614 ofpbuf_uninit(&ofpacts);
2c7ee524 4615
df70a773 4616 if (!error && ctx->xin->allow_side_effects) {
2c7ee524
JR
4617 error = ofproto_flow_mod_learn(ofm, ctx->xin->xcache != NULL);
4618 }
4619
4620 if (error) {
2d9b49dd
BP
4621 xlate_report_error(ctx, "LEARN action execution failed (%s).",
4622 ofperr_to_string(error));
2c7ee524 4623 }
2d9b49dd
BP
4624 } else {
4625 xlate_report(ctx, OFT_WARN,
4626 "suppressing side effects, so learn action ignored");
b256dc52
JS
4627 }
4628}
4629
4630static void
4631xlate_fin_timeout__(struct rule_dpif *rule, uint16_t tcp_flags,
4632 uint16_t idle_timeout, uint16_t hard_timeout)
4633{
4634 if (tcp_flags & (TCP_FIN | TCP_RST)) {
07a3cd5c 4635 ofproto_rule_reduce_timeouts(&rule->up, idle_timeout, hard_timeout);
b256dc52 4636 }
9583bc14
EJ
4637}
4638
9583bc14
EJ
4639static void
4640xlate_fin_timeout(struct xlate_ctx *ctx,
4641 const struct ofpact_fin_timeout *oft)
4642{
b256dc52 4643 if (ctx->rule) {
df70a773
JR
4644 if (ctx->xin->allow_side_effects) {
4645 xlate_fin_timeout__(ctx->rule, ctx->xin->tcp_flags,
4646 oft->fin_idle_timeout, oft->fin_hard_timeout);
4647 }
b256dc52
JS
4648 if (ctx->xin->xcache) {
4649 struct xc_entry *entry;
4650
4651 entry = xlate_cache_add_entry(ctx->xin->xcache, XC_FIN_TIMEOUT);
83709dfa
JR
4652 /* XC_RULE already holds a reference on the rule, none is taken
4653 * here. */
901a517e
JR
4654 entry->fin.rule = ctx->rule;
4655 entry->fin.idle = oft->fin_idle_timeout;
4656 entry->fin.hard = oft->fin_hard_timeout;
b256dc52 4657 }
9583bc14
EJ
4658 }
4659}
4660
4661static void
4662xlate_sample_action(struct xlate_ctx *ctx,
4663 const struct ofpact_sample *os)
4664{
f69f713b
BY
4665 odp_port_t output_odp_port = ODPP_NONE;
4666 odp_port_t tunnel_out_port = ODPP_NONE;
4667 struct dpif_ipfix *ipfix = ctx->xbridge->ipfix;
4668 bool emit_set_tunnel = false;
4669
4670 if (!ipfix || ctx->xin->flow.in_port.ofp_port == OFPP_NONE) {
4671 return;
4672 }
4673
e824d78d
JR
4674 /* Scale the probability from 16-bit to 32-bit while representing
4675 * the same percentage. */
4676 uint32_t probability = (os->probability << 16) | os->probability;
4677
b440dd8c 4678 if (!ctx->xbridge->support.variable_length_userdata) {
2d9b49dd
BP
4679 xlate_report_error(ctx, "ignoring NXAST_SAMPLE action because "
4680 "datapath lacks support (needs Linux 3.10+ or "
4681 "kernel module from OVS 1.11+)");
e824d78d
JR
4682 return;
4683 }
4684
f69f713b
BY
4685 /* If ofp_port in flow sample action is equel to ofp_port,
4686 * this sample action is a input port action. */
4687 if (os->sampling_port != OFPP_NONE &&
4688 os->sampling_port != ctx->xin->flow.in_port.ofp_port) {
4689 output_odp_port = ofp_port_to_odp_port(ctx->xbridge,
4690 os->sampling_port);
4691 if (output_odp_port == ODPP_NONE) {
2d9b49dd
BP
4692 xlate_report_error(ctx, "can't use unknown port %d in flow sample "
4693 "action", os->sampling_port);
f69f713b
BY
4694 return;
4695 }
4696
4697 if (dpif_ipfix_get_flow_exporter_tunnel_sampling(ipfix,
4698 os->collector_set_id)
4699 && dpif_ipfix_get_tunnel_port(ipfix, output_odp_port)) {
4700 tunnel_out_port = output_odp_port;
4701 emit_set_tunnel = true;
4702 }
4703 }
4704
4705 xlate_commit_actions(ctx);
4706 /* If 'emit_set_tunnel', sample(sampling_port=1) would translate
4707 * into datapath sample action set(tunnel(...)), sample(...) and
4708 * it is used for sampling egress tunnel information. */
4709 if (emit_set_tunnel) {
4710 const struct xport *xport = get_ofp_port(ctx->xbridge,
4711 os->sampling_port);
4712
4713 if (xport && xport->is_tunnel) {
4714 struct flow *flow = &ctx->xin->flow;
4715 tnl_port_send(xport->ofport, flow, ctx->wc);
4716 if (!ovs_native_tunneling_is_on(ctx->xbridge->ofproto)) {
4717 struct flow_tnl flow_tnl = flow->tunnel;
4718
4719 commit_odp_tunnel_action(flow, &ctx->base_flow,
4720 ctx->odp_actions);
4721 flow->tunnel = flow_tnl;
4722 }
4723 } else {
2d9b49dd
BP
4724 xlate_report_error(ctx,
4725 "sampling_port:%d should be a tunnel port.",
4726 os->sampling_port);
f69f713b
BY
4727 }
4728 }
e824d78d 4729
a6092018
BP
4730 union user_action_cookie cookie = {
4731 .flow_sample = {
4732 .type = USER_ACTION_COOKIE_FLOW_SAMPLE,
4733 .probability = os->probability,
4734 .collector_set_id = os->collector_set_id,
4735 .obs_domain_id = os->obs_domain_id,
4736 .obs_point_id = os->obs_point_id,
f69f713b 4737 .output_odp_port = output_odp_port,
4930ea56 4738 .direction = os->direction,
a6092018
BP
4739 }
4740 };
4741 compose_sample_action(ctx, probability, &cookie, sizeof cookie.flow_sample,
f69f713b 4742 tunnel_out_port, false);
9583bc14
EJ
4743}
4744
456024cb 4745/* Use datapath 'clone' or sample to enclose the translation of 'oc'. */
7ae62a67
WT
4746static void
4747compose_clone_action(struct xlate_ctx *ctx, const struct ofpact_nest *oc)
bef503e8
AZ
4748{
4749 size_t clone_offset = nl_msg_start_nested(ctx->odp_actions,
4750 OVS_ACTION_ATTR_CLONE);
456024cb
AZ
4751 do_xlate_actions(oc->actions, ofpact_nest_get_action_len(oc), ctx);
4752 nl_msg_end_non_empty_nested(ctx->odp_actions, clone_offset);
4753}
4754
4755/* Use datapath 'sample' action to translate clone. */
4756static void
4757compose_clone_action_using_sample(struct xlate_ctx *ctx,
4758 const struct ofpact_nest *oc)
4759{
4760 size_t offset = nl_msg_start_nested(ctx->odp_actions,
4761 OVS_ACTION_ATTR_SAMPLE);
4762
4763 size_t ac_offset = nl_msg_start_nested(ctx->odp_actions,
4764 OVS_SAMPLE_ATTR_ACTIONS);
bef503e8
AZ
4765
4766 do_xlate_actions(oc->actions, ofpact_nest_get_action_len(oc), ctx);
4767
456024cb
AZ
4768 if (nl_msg_end_non_empty_nested(ctx->odp_actions, ac_offset)) {
4769 nl_msg_cancel_nested(ctx->odp_actions, offset);
4770 } else {
4771 nl_msg_put_u32(ctx->odp_actions, OVS_SAMPLE_ATTR_PROBABILITY,
4772 UINT32_MAX); /* 100% probability. */
4773 nl_msg_end_nested(ctx->odp_actions, offset);
4774 }
bef503e8
AZ
4775}
4776
4777static void
4778xlate_clone(struct xlate_ctx *ctx, const struct ofpact_nest *oc)
7ae62a67 4779{
bd3c2df3 4780 bool old_was_mpls = ctx->was_mpls;
ba653d2a 4781 bool old_conntracked = ctx->conntracked;
7ae62a67 4782 struct flow old_flow = ctx->xin->flow;
ba653d2a 4783
b827b231
BP
4784 struct ofpbuf old_stack = ctx->stack;
4785 union mf_subvalue new_stack[1024 / sizeof(union mf_subvalue)];
4786 ofpbuf_use_stub(&ctx->stack, new_stack, sizeof new_stack);
4787 ofpbuf_put(&ctx->stack, old_stack.data, old_stack.size);
4788
4789 struct ofpbuf old_action_set = ctx->action_set;
4790 uint64_t actset_stub[1024 / 8];
4791 ofpbuf_use_stub(&ctx->action_set, actset_stub, sizeof actset_stub);
4792 ofpbuf_put(&ctx->action_set, old_action_set.data, old_action_set.size);
4793
456024cb
AZ
4794 /* Datapath clone action will make sure the pre clone packets
4795 * are used for actions after clone. Save and restore
4796 * ctx->base_flow to reflect this for the openflow pipeline. */
4797 struct flow old_base_flow = ctx->base_flow;
bef503e8 4798 if (ctx->xbridge->support.clone) {
bef503e8 4799 compose_clone_action(ctx, oc);
bef503e8 4800 } else {
456024cb 4801 compose_clone_action_using_sample(ctx, oc);
bef503e8 4802 }
456024cb 4803 ctx->base_flow = old_base_flow;
ba653d2a 4804
b827b231
BP
4805 ofpbuf_uninit(&ctx->action_set);
4806 ctx->action_set = old_action_set;
4807
4808 ofpbuf_uninit(&ctx->stack);
4809 ctx->stack = old_stack;
4810
7ae62a67 4811 ctx->xin->flow = old_flow;
ba653d2a
BP
4812
4813 /* The clone's conntrack execution should have no effect on the original
4814 * packet. */
4815 ctx->conntracked = old_conntracked;
bd3c2df3
BP
4816
4817 /* Popping MPLS from the clone should have no effect on the original
4818 * packet. */
4819 ctx->was_mpls = old_was_mpls;
7ae62a67
WT
4820}
4821
076caa2f
JR
4822static void
4823xlate_meter_action(struct xlate_ctx *ctx, const struct ofpact_meter *meter)
4824{
4825 if (meter->provider_meter_id != UINT32_MAX) {
4826 nl_msg_put_u32(ctx->odp_actions, OVS_ACTION_ATTR_METER,
4827 meter->provider_meter_id);
4828 }
4829}
4830
9583bc14 4831static bool
46c88433 4832may_receive(const struct xport *xport, struct xlate_ctx *ctx)
9583bc14 4833{
bbbca389 4834 if (xport->config & (is_stp(&ctx->xin->flow)
46c88433
EJ
4835 ? OFPUTIL_PC_NO_RECV_STP
4836 : OFPUTIL_PC_NO_RECV)) {
9583bc14
EJ
4837 return false;
4838 }
4839
4840 /* Only drop packets here if both forwarding and learning are
4841 * disabled. If just learning is enabled, we need to have
4842 * OFPP_NORMAL and the learning action have a look at the packet
4843 * before we can drop it. */
9efd308e
DV
4844 if ((!xport_stp_forward_state(xport) && !xport_stp_learn_state(xport)) ||
4845 (!xport_rstp_forward_state(xport) && !xport_rstp_learn_state(xport))) {
9583bc14
EJ
4846 return false;
4847 }
4848
4849 return true;
4850}
4851
7fdb60a7 4852static void
7e7e8dbb
BP
4853xlate_write_actions__(struct xlate_ctx *ctx,
4854 const struct ofpact *ofpacts, size_t ofpacts_len)
7fdb60a7 4855{
c61f3870
BP
4856 /* Maintain actset_output depending on the contents of the action set:
4857 *
4858 * - OFPP_UNSET, if there is no "output" action.
4859 *
4860 * - The output port, if there is an "output" action and no "group"
4861 * action.
4862 *
4863 * - OFPP_UNSET, if there is a "group" action.
4864 */
4865 if (!ctx->action_set_has_group) {
7e7e8dbb
BP
4866 const struct ofpact *a;
4867 OFPACT_FOR_EACH (a, ofpacts, ofpacts_len) {
4868 if (a->type == OFPACT_OUTPUT) {
4869 ctx->xin->flow.actset_output = ofpact_get_OUTPUT(a)->port;
4870 } else if (a->type == OFPACT_GROUP) {
c61f3870
BP
4871 ctx->xin->flow.actset_output = OFPP_UNSET;
4872 ctx->action_set_has_group = true;
9055ca9a 4873 break;
c61f3870
BP
4874 }
4875 }
4876 }
4877
7e7e8dbb
BP
4878 ofpbuf_put(&ctx->action_set, ofpacts, ofpacts_len);
4879}
4880
4881static void
4882xlate_write_actions(struct xlate_ctx *ctx, const struct ofpact_nest *a)
4883{
4884 xlate_write_actions__(ctx, a->actions, ofpact_nest_get_action_len(a));
7fdb60a7
SH
4885}
4886
4887static void
4888xlate_action_set(struct xlate_ctx *ctx)
4889{
2d9b49dd
BP
4890 uint64_t action_list_stub[1024 / 8];
4891 struct ofpbuf action_list = OFPBUF_STUB_INITIALIZER(action_list_stub);
7fdb60a7 4892 ofpacts_execute_action_set(&action_list, &ctx->action_set);
ed9c9e3e
JR
4893 /* Clear the action set, as it is not needed any more. */
4894 ofpbuf_clear(&ctx->action_set);
2d9b49dd
BP
4895 if (action_list.size) {
4896 ctx->in_action_set = true;
4897
4898 struct ovs_list *old_trace = ctx->xin->trace;
4899 ctx->xin->trace = xlate_report(ctx, OFT_TABLE,
4900 "--. Executing action set:");
4901 do_xlate_actions(action_list.data, action_list.size, ctx);
4902 ctx->xin->trace = old_trace;
4903
4904 ctx->in_action_set = false;
4905 }
7fdb60a7
SH
4906 ofpbuf_uninit(&action_list);
4907}
4908
e672ff9b 4909static void
1d361a81 4910freeze_put_unroll_xlate(struct xlate_ctx *ctx)
e672ff9b 4911{
1d361a81 4912 struct ofpact_unroll_xlate *unroll = ctx->frozen_actions.header;
e672ff9b
JR
4913
4914 /* Restore the table_id and rule cookie for a potential PACKET
4915 * IN if needed. */
4916 if (!unroll ||
4917 (ctx->table_id != unroll->rule_table_id
4918 || ctx->rule_cookie != unroll->rule_cookie)) {
1d361a81 4919 unroll = ofpact_put_UNROLL_XLATE(&ctx->frozen_actions);
e672ff9b
JR
4920 unroll->rule_table_id = ctx->table_id;
4921 unroll->rule_cookie = ctx->rule_cookie;
1d361a81 4922 ctx->frozen_actions.header = unroll;
e672ff9b
JR
4923 }
4924}
4925
4926
1d361a81
BP
4927/* Copy actions 'a' through 'end' to ctx->frozen_actions, which will be
4928 * executed after thawing. Inserts an UNROLL_XLATE action, if none is already
4929 * present, before any action that may depend on the current table ID or flow
4930 * cookie. */
e672ff9b 4931static void
1d361a81 4932freeze_unroll_actions(const struct ofpact *a, const struct ofpact *end,
e672ff9b
JR
4933 struct xlate_ctx *ctx)
4934{
c2b283b7 4935 for (; a < end; a = ofpact_next(a)) {
e672ff9b 4936 switch (a->type) {
e672ff9b 4937 case OFPACT_OUTPUT_REG:
aaca4fe0 4938 case OFPACT_OUTPUT_TRUNC:
e672ff9b
JR
4939 case OFPACT_GROUP:
4940 case OFPACT_OUTPUT:
4941 case OFPACT_CONTROLLER:
4942 case OFPACT_DEC_MPLS_TTL:
4943 case OFPACT_DEC_TTL:
83a31283
BP
4944 /* These actions may generate asynchronous messages, which include
4945 * table ID and flow cookie information. */
1d361a81 4946 freeze_put_unroll_xlate(ctx);
e672ff9b
JR
4947 break;
4948
83a31283
BP
4949 case OFPACT_RESUBMIT:
4950 if (ofpact_get_RESUBMIT(a)->table_id == 0xff) {
4951 /* This resubmit action is relative to the current table, so we
4952 * need to track what table that is.*/
1d361a81 4953 freeze_put_unroll_xlate(ctx);
83a31283
BP
4954 }
4955 break;
4956
e672ff9b
JR
4957 case OFPACT_SET_TUNNEL:
4958 case OFPACT_REG_MOVE:
4959 case OFPACT_SET_FIELD:
4960 case OFPACT_STACK_PUSH:
4961 case OFPACT_STACK_POP:
4962 case OFPACT_LEARN:
4963 case OFPACT_WRITE_METADATA:
83a31283 4964 case OFPACT_GOTO_TABLE:
e672ff9b
JR
4965 case OFPACT_ENQUEUE:
4966 case OFPACT_SET_VLAN_VID:
4967 case OFPACT_SET_VLAN_PCP:
4968 case OFPACT_STRIP_VLAN:
4969 case OFPACT_PUSH_VLAN:
4970 case OFPACT_SET_ETH_SRC:
4971 case OFPACT_SET_ETH_DST:
4972 case OFPACT_SET_IPV4_SRC:
4973 case OFPACT_SET_IPV4_DST:
4974 case OFPACT_SET_IP_DSCP:
4975 case OFPACT_SET_IP_ECN:
4976 case OFPACT_SET_IP_TTL:
4977 case OFPACT_SET_L4_SRC_PORT:
4978 case OFPACT_SET_L4_DST_PORT:
4979 case OFPACT_SET_QUEUE:
4980 case OFPACT_POP_QUEUE:
4981 case OFPACT_PUSH_MPLS:
4982 case OFPACT_POP_MPLS:
4983 case OFPACT_SET_MPLS_LABEL:
4984 case OFPACT_SET_MPLS_TC:
4985 case OFPACT_SET_MPLS_TTL:
4986 case OFPACT_MULTIPATH:
4987 case OFPACT_BUNDLE:
4988 case OFPACT_EXIT:
4989 case OFPACT_UNROLL_XLATE:
4990 case OFPACT_FIN_TIMEOUT:
4991 case OFPACT_CLEAR_ACTIONS:
4992 case OFPACT_WRITE_ACTIONS:
4993 case OFPACT_METER:
4994 case OFPACT_SAMPLE:
7ae62a67 4995 case OFPACT_CLONE:
d4abaff5 4996 case OFPACT_DEBUG_RECIRC:
07659514 4997 case OFPACT_CT:
72fe7578 4998 case OFPACT_CT_CLEAR:
9ac0aada 4999 case OFPACT_NAT:
83a31283 5000 /* These may not generate PACKET INs. */
e672ff9b
JR
5001 break;
5002
e672ff9b
JR
5003 case OFPACT_NOTE:
5004 case OFPACT_CONJUNCTION:
83a31283 5005 /* These need not be copied for restoration. */
e672ff9b
JR
5006 continue;
5007 }
5008 /* Copy the action over. */
1d361a81 5009 ofpbuf_put(&ctx->frozen_actions, a, OFPACT_ALIGN(a->len));
e672ff9b
JR
5010 }
5011}
5012
8e53fe8c 5013static void
f2d105b5
JS
5014put_ct_mark(const struct flow *flow, struct ofpbuf *odp_actions,
5015 struct flow_wildcards *wc)
8e53fe8c 5016{
2a754f4a
JS
5017 if (wc->masks.ct_mark) {
5018 struct {
5019 uint32_t key;
5020 uint32_t mask;
5021 } *odp_ct_mark;
5022
5023 odp_ct_mark = nl_msg_put_unspec_uninit(odp_actions, OVS_CT_ATTR_MARK,
5024 sizeof(*odp_ct_mark));
5025 odp_ct_mark->key = flow->ct_mark & wc->masks.ct_mark;
5026 odp_ct_mark->mask = wc->masks.ct_mark;
8e53fe8c
JS
5027 }
5028}
5029
9daf2348 5030static void
f2d105b5
JS
5031put_ct_label(const struct flow *flow, struct ofpbuf *odp_actions,
5032 struct flow_wildcards *wc)
9daf2348 5033{
2ff8484b 5034 if (!ovs_u128_is_zero(wc->masks.ct_label)) {
9daf2348
JS
5035 struct {
5036 ovs_u128 key;
5037 ovs_u128 mask;
5038 } *odp_ct_label;
5039
5040 odp_ct_label = nl_msg_put_unspec_uninit(odp_actions,
5041 OVS_CT_ATTR_LABELS,
5042 sizeof(*odp_ct_label));
f2d105b5 5043 odp_ct_label->key = ovs_u128_and(flow->ct_label, wc->masks.ct_label);
9daf2348
JS
5044 odp_ct_label->mask = wc->masks.ct_label;
5045 }
5046}
5047
d787ad39 5048static void
2d9b49dd
BP
5049put_ct_helper(struct xlate_ctx *ctx,
5050 struct ofpbuf *odp_actions, struct ofpact_conntrack *ofc)
d787ad39
JS
5051{
5052 if (ofc->alg) {
40c7b2fc
JS
5053 switch(ofc->alg) {
5054 case IPPORT_FTP:
d787ad39 5055 nl_msg_put_string(odp_actions, OVS_CT_ATTR_HELPER, "ftp");
40c7b2fc
JS
5056 break;
5057 case IPPORT_TFTP:
5058 nl_msg_put_string(odp_actions, OVS_CT_ATTR_HELPER, "tftp");
5059 break;
5060 default:
2d9b49dd 5061 xlate_report_error(ctx, "cannot serialize ct_helper %d", ofc->alg);
40c7b2fc 5062 break;
d787ad39
JS
5063 }
5064 }
5065}
5066
9ac0aada
JR
5067static void
5068put_ct_nat(struct xlate_ctx *ctx)
5069{
5070 struct ofpact_nat *ofn = ctx->ct_nat_action;
5071 size_t nat_offset;
5072
5073 if (!ofn) {
5074 return;
5075 }
5076
5077 nat_offset = nl_msg_start_nested(ctx->odp_actions, OVS_CT_ATTR_NAT);
5078 if (ofn->flags & NX_NAT_F_SRC || ofn->flags & NX_NAT_F_DST) {
5079 nl_msg_put_flag(ctx->odp_actions, ofn->flags & NX_NAT_F_SRC
5080 ? OVS_NAT_ATTR_SRC : OVS_NAT_ATTR_DST);
5081 if (ofn->flags & NX_NAT_F_PERSISTENT) {
5082 nl_msg_put_flag(ctx->odp_actions, OVS_NAT_ATTR_PERSISTENT);
5083 }
5084 if (ofn->flags & NX_NAT_F_PROTO_HASH) {
5085 nl_msg_put_flag(ctx->odp_actions, OVS_NAT_ATTR_PROTO_HASH);
5086 } else if (ofn->flags & NX_NAT_F_PROTO_RANDOM) {
5087 nl_msg_put_flag(ctx->odp_actions, OVS_NAT_ATTR_PROTO_RANDOM);
5088 }
5089 if (ofn->range_af == AF_INET) {
73e8bc23 5090 nl_msg_put_be32(ctx->odp_actions, OVS_NAT_ATTR_IP_MIN,
9ac0aada
JR
5091 ofn->range.addr.ipv4.min);
5092 if (ofn->range.addr.ipv4.max &&
73e8bc23
BP
5093 (ntohl(ofn->range.addr.ipv4.max)
5094 > ntohl(ofn->range.addr.ipv4.min))) {
5095 nl_msg_put_be32(ctx->odp_actions, OVS_NAT_ATTR_IP_MAX,
5096 ofn->range.addr.ipv4.max);
9ac0aada
JR
5097 }
5098 } else if (ofn->range_af == AF_INET6) {
5099 nl_msg_put_unspec(ctx->odp_actions, OVS_NAT_ATTR_IP_MIN,
5100 &ofn->range.addr.ipv6.min,
5101 sizeof ofn->range.addr.ipv6.min);
5102 if (!ipv6_mask_is_any(&ofn->range.addr.ipv6.max) &&
5103 memcmp(&ofn->range.addr.ipv6.max, &ofn->range.addr.ipv6.min,
5104 sizeof ofn->range.addr.ipv6.max) > 0) {
5105 nl_msg_put_unspec(ctx->odp_actions, OVS_NAT_ATTR_IP_MAX,
5106 &ofn->range.addr.ipv6.max,
5107 sizeof ofn->range.addr.ipv6.max);
5108 }
5109 }
5110 if (ofn->range_af != AF_UNSPEC && ofn->range.proto.min) {
5111 nl_msg_put_u16(ctx->odp_actions, OVS_NAT_ATTR_PROTO_MIN,
5112 ofn->range.proto.min);
5113 if (ofn->range.proto.max &&
5114 ofn->range.proto.max > ofn->range.proto.min) {
5115 nl_msg_put_u16(ctx->odp_actions, OVS_NAT_ATTR_PROTO_MAX,
5116 ofn->range.proto.max);
5117 }
5118 }
5119 }
5120 nl_msg_end_nested(ctx->odp_actions, nat_offset);
5121}
5122
07659514
JS
5123static void
5124compose_conntrack_action(struct xlate_ctx *ctx, struct ofpact_conntrack *ofc)
5125{
9daf2348 5126 ovs_u128 old_ct_label = ctx->base_flow.ct_label;
f2d105b5 5127 ovs_u128 old_ct_label_mask = ctx->wc->masks.ct_label;
8e53fe8c 5128 uint32_t old_ct_mark = ctx->base_flow.ct_mark;
f2d105b5 5129 uint32_t old_ct_mark_mask = ctx->wc->masks.ct_mark;
07659514
JS
5130 size_t ct_offset;
5131 uint16_t zone;
5132
5133 /* Ensure that any prior actions are applied before composing the new
5134 * conntrack action. */
5135 xlate_commit_actions(ctx);
5136
8e53fe8c 5137 /* Process nested actions first, to populate the key. */
9ac0aada 5138 ctx->ct_nat_action = NULL;
f2d105b5
JS
5139 ctx->wc->masks.ct_mark = 0;
5140 ctx->wc->masks.ct_label.u64.hi = ctx->wc->masks.ct_label.u64.lo = 0;
8e53fe8c
JS
5141 do_xlate_actions(ofc->actions, ofpact_ct_get_action_len(ofc), ctx);
5142
07659514
JS
5143 if (ofc->zone_src.field) {
5144 zone = mf_get_subfield(&ofc->zone_src, &ctx->xin->flow);
5145 } else {
5146 zone = ofc->zone_imm;
5147 }
5148
5149 ct_offset = nl_msg_start_nested(ctx->odp_actions, OVS_ACTION_ATTR_CT);
5150 if (ofc->flags & NX_CT_F_COMMIT) {
5151 nl_msg_put_flag(ctx->odp_actions, OVS_CT_ATTR_COMMIT);
5152 }
5153 nl_msg_put_u16(ctx->odp_actions, OVS_CT_ATTR_ZONE, zone);
f2d105b5
JS
5154 put_ct_mark(&ctx->xin->flow, ctx->odp_actions, ctx->wc);
5155 put_ct_label(&ctx->xin->flow, ctx->odp_actions, ctx->wc);
2d9b49dd 5156 put_ct_helper(ctx, ctx->odp_actions, ofc);
9ac0aada
JR
5157 put_ct_nat(ctx);
5158 ctx->ct_nat_action = NULL;
07659514
JS
5159 nl_msg_end_nested(ctx->odp_actions, ct_offset);
5160
8e53fe8c
JS
5161 /* Restore the original ct fields in the key. These should only be exposed
5162 * after recirculation to another table. */
5163 ctx->base_flow.ct_mark = old_ct_mark;
f2d105b5 5164 ctx->wc->masks.ct_mark = old_ct_mark_mask;
9daf2348 5165 ctx->base_flow.ct_label = old_ct_label;
f2d105b5 5166 ctx->wc->masks.ct_label = old_ct_label_mask;
8e53fe8c 5167
07659514
JS
5168 if (ofc->recirc_table == NX_CT_RECIRC_NONE) {
5169 /* If we do not recirculate as part of this action, hide the results of
5170 * connection tracking from subsequent recirculations. */
5171 ctx->conntracked = false;
5172 } else {
5173 /* Use ct_* fields from datapath during recirculation upcall. */
5174 ctx->conntracked = true;
e37b8437 5175 compose_recirculate_and_fork(ctx, ofc->recirc_table);
07659514
JS
5176 }
5177}
5178
e12ec36b
SH
5179static void
5180recirc_for_mpls(const struct ofpact *a, struct xlate_ctx *ctx)
5181{
5182 /* No need to recirculate if already exiting. */
5183 if (ctx->exit) {
5184 return;
5185 }
5186
5187 /* Do not consider recirculating unless the packet was previously MPLS. */
5188 if (!ctx->was_mpls) {
5189 return;
5190 }
5191
5192 /* Special case these actions, only recirculating if necessary.
5193 * This avoids the overhead of recirculation in common use-cases.
5194 */
5195 switch (a->type) {
5196
5197 /* Output actions do not require recirculation. */
5198 case OFPACT_OUTPUT:
aaca4fe0 5199 case OFPACT_OUTPUT_TRUNC:
e12ec36b
SH
5200 case OFPACT_ENQUEUE:
5201 case OFPACT_OUTPUT_REG:
5202 /* Set actions that don't touch L3+ fields do not require recirculation. */
5203 case OFPACT_SET_VLAN_VID:
5204 case OFPACT_SET_VLAN_PCP:
5205 case OFPACT_SET_ETH_SRC:
5206 case OFPACT_SET_ETH_DST:
5207 case OFPACT_SET_TUNNEL:
5208 case OFPACT_SET_QUEUE:
5209 /* If actions of a group require recirculation that can be detected
5210 * when translating them. */
5211 case OFPACT_GROUP:
5212 return;
5213
5214 /* Set field that don't touch L3+ fields don't require recirculation. */
5215 case OFPACT_SET_FIELD:
5216 if (mf_is_l3_or_higher(ofpact_get_SET_FIELD(a)->field)) {
5217 break;
5218 }
5219 return;
5220
5221 /* For simplicity, recirculate in all other cases. */
5222 case OFPACT_CONTROLLER:
5223 case OFPACT_BUNDLE:
5224 case OFPACT_STRIP_VLAN:
5225 case OFPACT_PUSH_VLAN:
5226 case OFPACT_SET_IPV4_SRC:
5227 case OFPACT_SET_IPV4_DST:
5228 case OFPACT_SET_IP_DSCP:
5229 case OFPACT_SET_IP_ECN:
5230 case OFPACT_SET_IP_TTL:
5231 case OFPACT_SET_L4_SRC_PORT:
5232 case OFPACT_SET_L4_DST_PORT:
5233 case OFPACT_REG_MOVE:
5234 case OFPACT_STACK_PUSH:
5235 case OFPACT_STACK_POP:
5236 case OFPACT_DEC_TTL:
5237 case OFPACT_SET_MPLS_LABEL:
5238 case OFPACT_SET_MPLS_TC:
5239 case OFPACT_SET_MPLS_TTL:
5240 case OFPACT_DEC_MPLS_TTL:
5241 case OFPACT_PUSH_MPLS:
5242 case OFPACT_POP_MPLS:
5243 case OFPACT_POP_QUEUE:
5244 case OFPACT_FIN_TIMEOUT:
5245 case OFPACT_RESUBMIT:
5246 case OFPACT_LEARN:
5247 case OFPACT_CONJUNCTION:
5248 case OFPACT_MULTIPATH:
5249 case OFPACT_NOTE:
5250 case OFPACT_EXIT:
5251 case OFPACT_SAMPLE:
7ae62a67 5252 case OFPACT_CLONE:
e12ec36b
SH
5253 case OFPACT_UNROLL_XLATE:
5254 case OFPACT_CT:
72fe7578 5255 case OFPACT_CT_CLEAR:
e12ec36b
SH
5256 case OFPACT_NAT:
5257 case OFPACT_DEBUG_RECIRC:
5258 case OFPACT_METER:
5259 case OFPACT_CLEAR_ACTIONS:
5260 case OFPACT_WRITE_ACTIONS:
5261 case OFPACT_WRITE_METADATA:
5262 case OFPACT_GOTO_TABLE:
5263 default:
5264 break;
5265 }
5266
5267 /* Recirculate */
5268 ctx_trigger_freeze(ctx);
5269}
5270
2d9b49dd
BP
5271static void
5272xlate_ofpact_reg_move(struct xlate_ctx *ctx, const struct ofpact_reg_move *a)
5273{
5274 mf_subfield_copy(&a->src, &a->dst, &ctx->xin->flow, ctx->wc);
5275 xlate_report_subfield(ctx, &a->dst);
5276}
5277
5278static void
5279xlate_ofpact_stack_pop(struct xlate_ctx *ctx, const struct ofpact_stack *a)
5280{
5281 if (nxm_execute_stack_pop(a, &ctx->xin->flow, ctx->wc, &ctx->stack)) {
5282 xlate_report_subfield(ctx, &a->subfield);
5283 } else {
5284 xlate_report_error(ctx, "stack underflow");
5285 }
5286}
5287
5288/* Restore translation context data that was stored earlier. */
5289static void
5290xlate_ofpact_unroll_xlate(struct xlate_ctx *ctx,
5291 const struct ofpact_unroll_xlate *a)
5292{
5293 ctx->table_id = a->rule_table_id;
5294 ctx->rule_cookie = a->rule_cookie;
5295 xlate_report(ctx, OFT_THAW, "restored state: table=%"PRIu8", "
5296 "cookie=%#"PRIx64, a->rule_table_id, a->rule_cookie);
5297}
5298
9583bc14
EJ
5299static void
5300do_xlate_actions(const struct ofpact *ofpacts, size_t ofpacts_len,
5301 struct xlate_ctx *ctx)
5302{
49a73e0c 5303 struct flow_wildcards *wc = ctx->wc;
33bf9176 5304 struct flow *flow = &ctx->xin->flow;
9583bc14
EJ
5305 const struct ofpact *a;
5306
a36de779 5307 if (ovs_native_tunneling_is_on(ctx->xbridge->ofproto)) {
53902038 5308 tnl_neigh_snoop(flow, wc, ctx->xbridge->name);
a36de779 5309 }
f47ea021
JR
5310 /* dl_type already in the mask, not set below. */
5311
2d9b49dd
BP
5312 if (!ofpacts_len) {
5313 xlate_report(ctx, OFT_ACTION, "drop");
5314 return;
5315 }
5316
9583bc14
EJ
5317 OFPACT_FOR_EACH (a, ofpacts, ofpacts_len) {
5318 struct ofpact_controller *controller;
5319 const struct ofpact_metadata *metadata;
b2dd70be
JR
5320 const struct ofpact_set_field *set_field;
5321 const struct mf_field *mf;
9583bc14 5322
fff1b9c0
JR
5323 if (ctx->error) {
5324 break;
5325 }
5326
e12ec36b
SH
5327 recirc_for_mpls(a, ctx);
5328
e672ff9b
JR
5329 if (ctx->exit) {
5330 /* Check if need to store the remaining actions for later
5331 * execution. */
1d361a81
BP
5332 if (ctx->freezing) {
5333 freeze_unroll_actions(a, ofpact_end(ofpacts, ofpacts_len),
e672ff9b
JR
5334 ctx);
5335 }
5336 break;
7bbdd84f
SH
5337 }
5338
2d9b49dd
BP
5339 if (OVS_UNLIKELY(ctx->xin->trace)) {
5340 struct ds s = DS_EMPTY_INITIALIZER;
5341 ofpacts_format(a, OFPACT_ALIGN(a->len), &s);
5342 xlate_report(ctx, OFT_ACTION, "%s", ds_cstr(&s));
5343 ds_destroy(&s);
5344 }
5345
9583bc14
EJ
5346 switch (a->type) {
5347 case OFPACT_OUTPUT:
5348 xlate_output_action(ctx, ofpact_get_OUTPUT(a)->port,
5349 ofpact_get_OUTPUT(a)->max_len, true);
5350 break;
5351
7395c052 5352 case OFPACT_GROUP:
f4fb341b 5353 if (xlate_group_action(ctx, ofpact_get_GROUP(a)->group_id)) {
1d741d6d 5354 /* Group could not be found. */
db88b35c
JR
5355
5356 /* XXX: Terminates action list translation, but does not
5357 * terminate the pipeline. */
f4fb341b
SH
5358 return;
5359 }
7395c052
NZ
5360 break;
5361
9583bc14
EJ
5362 case OFPACT_CONTROLLER:
5363 controller = ofpact_get_CONTROLLER(a);
77ab5fd2
BP
5364 if (controller->pause) {
5365 ctx->pause = controller;
5366 ctx->xout->slow |= SLOW_CONTROLLER;
5367 ctx_trigger_freeze(ctx);
5368 a = ofpact_next(a);
5369 } else {
5370 execute_controller_action(ctx, controller->max_len,
5371 controller->reason,
5372 controller->controller_id,
5373 controller->userdata,
5374 controller->userdata_len);
5375 }
9583bc14
EJ
5376 break;
5377
5378 case OFPACT_ENQUEUE:
16194afd
DDP
5379 memset(&wc->masks.skb_priority, 0xff,
5380 sizeof wc->masks.skb_priority);
9583bc14
EJ
5381 xlate_enqueue_action(ctx, ofpact_get_ENQUEUE(a));
5382 break;
5383
5384 case OFPACT_SET_VLAN_VID:
f74e7df7 5385 wc->masks.vlan_tci |= htons(VLAN_VID_MASK | VLAN_CFI);
ca287d20
JR
5386 if (flow->vlan_tci & htons(VLAN_CFI) ||
5387 ofpact_get_SET_VLAN_VID(a)->push_vlan_if_needed) {
5388 flow->vlan_tci &= ~htons(VLAN_VID_MASK);
5389 flow->vlan_tci |= (htons(ofpact_get_SET_VLAN_VID(a)->vlan_vid)
5390 | htons(VLAN_CFI));
5391 }
9583bc14
EJ
5392 break;
5393
5394 case OFPACT_SET_VLAN_PCP:
f74e7df7 5395 wc->masks.vlan_tci |= htons(VLAN_PCP_MASK | VLAN_CFI);
ca287d20
JR
5396 if (flow->vlan_tci & htons(VLAN_CFI) ||
5397 ofpact_get_SET_VLAN_PCP(a)->push_vlan_if_needed) {
5398 flow->vlan_tci &= ~htons(VLAN_PCP_MASK);
5399 flow->vlan_tci |= htons((ofpact_get_SET_VLAN_PCP(a)->vlan_pcp
5400 << VLAN_PCP_SHIFT) | VLAN_CFI);
5401 }
9583bc14
EJ
5402 break;
5403
5404 case OFPACT_STRIP_VLAN:
f74e7df7 5405 memset(&wc->masks.vlan_tci, 0xff, sizeof wc->masks.vlan_tci);
33bf9176 5406 flow->vlan_tci = htons(0);
9583bc14
EJ
5407 break;
5408
5409 case OFPACT_PUSH_VLAN:
5410 /* XXX 802.1AD(QinQ) */
f74e7df7 5411 memset(&wc->masks.vlan_tci, 0xff, sizeof wc->masks.vlan_tci);
33bf9176 5412 flow->vlan_tci = htons(VLAN_CFI);
9583bc14
EJ
5413 break;
5414
5415 case OFPACT_SET_ETH_SRC:
74ff3298
JR
5416 WC_MASK_FIELD(wc, dl_src);
5417 flow->dl_src = ofpact_get_SET_ETH_SRC(a)->mac;
9583bc14
EJ
5418 break;
5419
5420 case OFPACT_SET_ETH_DST:
74ff3298
JR
5421 WC_MASK_FIELD(wc, dl_dst);
5422 flow->dl_dst = ofpact_get_SET_ETH_DST(a)->mac;
9583bc14
EJ
5423 break;
5424
5425 case OFPACT_SET_IPV4_SRC:
33bf9176 5426 if (flow->dl_type == htons(ETH_TYPE_IP)) {
f47ea021 5427 memset(&wc->masks.nw_src, 0xff, sizeof wc->masks.nw_src);
33bf9176 5428 flow->nw_src = ofpact_get_SET_IPV4_SRC(a)->ipv4;
9583bc14
EJ
5429 }
5430 break;
5431
5432 case OFPACT_SET_IPV4_DST:
33bf9176 5433 if (flow->dl_type == htons(ETH_TYPE_IP)) {
f47ea021 5434 memset(&wc->masks.nw_dst, 0xff, sizeof wc->masks.nw_dst);
33bf9176 5435 flow->nw_dst = ofpact_get_SET_IPV4_DST(a)->ipv4;
9583bc14
EJ
5436 }
5437 break;
5438
04f01c24
BP
5439 case OFPACT_SET_IP_DSCP:
5440 if (is_ip_any(flow)) {
f47ea021 5441 wc->masks.nw_tos |= IP_DSCP_MASK;
33bf9176 5442 flow->nw_tos &= ~IP_DSCP_MASK;
04f01c24 5443 flow->nw_tos |= ofpact_get_SET_IP_DSCP(a)->dscp;
9583bc14
EJ
5444 }
5445 break;
5446
ff14eb7a
JR
5447 case OFPACT_SET_IP_ECN:
5448 if (is_ip_any(flow)) {
5449 wc->masks.nw_tos |= IP_ECN_MASK;
5450 flow->nw_tos &= ~IP_ECN_MASK;
5451 flow->nw_tos |= ofpact_get_SET_IP_ECN(a)->ecn;
5452 }
5453 break;
5454
0c20dbe4
JR
5455 case OFPACT_SET_IP_TTL:
5456 if (is_ip_any(flow)) {
5457 wc->masks.nw_ttl = 0xff;
5458 flow->nw_ttl = ofpact_get_SET_IP_TTL(a)->ttl;
5459 }
5460 break;
5461
9583bc14 5462 case OFPACT_SET_L4_SRC_PORT:
b8778a0d 5463 if (is_ip_any(flow) && !(flow->nw_frag & FLOW_NW_FRAG_LATER)) {
f47ea021
JR
5464 memset(&wc->masks.nw_proto, 0xff, sizeof wc->masks.nw_proto);
5465 memset(&wc->masks.tp_src, 0xff, sizeof wc->masks.tp_src);
33bf9176 5466 flow->tp_src = htons(ofpact_get_SET_L4_SRC_PORT(a)->port);
9583bc14
EJ
5467 }
5468 break;
5469
5470 case OFPACT_SET_L4_DST_PORT:
b8778a0d 5471 if (is_ip_any(flow) && !(flow->nw_frag & FLOW_NW_FRAG_LATER)) {
f47ea021
JR
5472 memset(&wc->masks.nw_proto, 0xff, sizeof wc->masks.nw_proto);
5473 memset(&wc->masks.tp_dst, 0xff, sizeof wc->masks.tp_dst);
33bf9176 5474 flow->tp_dst = htons(ofpact_get_SET_L4_DST_PORT(a)->port);
9583bc14
EJ
5475 }
5476 break;
5477
5478 case OFPACT_RESUBMIT:
8bf009bf
JR
5479 /* Freezing complicates resubmit. Some action in the flow
5480 * entry found by resubmit might trigger freezing. If that
5481 * happens, then we do not want to execute the resubmit again after
5482 * during thawing, so we want to skip back to the head of the loop
5483 * to avoid that, only adding any actions that follow the resubmit
5484 * to the frozen actions.
6b1c5734 5485 */
9583bc14 5486 xlate_ofpact_resubmit(ctx, ofpact_get_RESUBMIT(a));
6b1c5734 5487 continue;
9583bc14
EJ
5488
5489 case OFPACT_SET_TUNNEL:
33bf9176 5490 flow->tunnel.tun_id = htonll(ofpact_get_SET_TUNNEL(a)->tun_id);
9583bc14
EJ
5491 break;
5492
5493 case OFPACT_SET_QUEUE:
16194afd
DDP
5494 memset(&wc->masks.skb_priority, 0xff,
5495 sizeof wc->masks.skb_priority);
9583bc14
EJ
5496 xlate_set_queue_action(ctx, ofpact_get_SET_QUEUE(a)->queue_id);
5497 break;
5498
5499 case OFPACT_POP_QUEUE:
16194afd
DDP
5500 memset(&wc->masks.skb_priority, 0xff,
5501 sizeof wc->masks.skb_priority);
2d9b49dd
BP
5502 if (flow->skb_priority != ctx->orig_skb_priority) {
5503 flow->skb_priority = ctx->orig_skb_priority;
5504 xlate_report(ctx, OFT_DETAIL, "queue = %#"PRIx32,
5505 flow->skb_priority);
5506 }
9583bc14
EJ
5507 break;
5508
5509 case OFPACT_REG_MOVE:
2d9b49dd 5510 xlate_ofpact_reg_move(ctx, ofpact_get_REG_MOVE(a));
9583bc14
EJ
5511 break;
5512
b2dd70be
JR
5513 case OFPACT_SET_FIELD:
5514 set_field = ofpact_get_SET_FIELD(a);
5515 mf = set_field->field;
b2dd70be 5516
aff49b8c
JR
5517 /* Set the field only if the packet actually has it. */
5518 if (mf_are_prereqs_ok(mf, flow, wc)) {
128684a6
JR
5519 mf_mask_field_masked(mf, ofpact_set_field_mask(set_field), wc);
5520 mf_set_flow_value_masked(mf, set_field->value,
5521 ofpact_set_field_mask(set_field),
5522 flow);
2d9b49dd
BP
5523 } else {
5524 xlate_report(ctx, OFT_WARN,
5525 "unmet prerequisites for %s, set_field ignored",
5526 mf->name);
5527
b8778a0d 5528 }
b2dd70be
JR
5529 break;
5530
9583bc14 5531 case OFPACT_STACK_PUSH:
33bf9176
BP
5532 nxm_execute_stack_push(ofpact_get_STACK_PUSH(a), flow, wc,
5533 &ctx->stack);
9583bc14
EJ
5534 break;
5535
5536 case OFPACT_STACK_POP:
2d9b49dd 5537 xlate_ofpact_stack_pop(ctx, ofpact_get_STACK_POP(a));
9583bc14
EJ
5538 break;
5539
5540 case OFPACT_PUSH_MPLS:
8bfd0fda 5541 compose_mpls_push_action(ctx, ofpact_get_PUSH_MPLS(a));
9583bc14
EJ
5542 break;
5543
5544 case OFPACT_POP_MPLS:
8bfd0fda 5545 compose_mpls_pop_action(ctx, ofpact_get_POP_MPLS(a)->ethertype);
9583bc14
EJ
5546 break;
5547
097d4939 5548 case OFPACT_SET_MPLS_LABEL:
8bfd0fda
BP
5549 compose_set_mpls_label_action(
5550 ctx, ofpact_get_SET_MPLS_LABEL(a)->label);
1d741d6d 5551 break;
097d4939
JR
5552
5553 case OFPACT_SET_MPLS_TC:
8bfd0fda 5554 compose_set_mpls_tc_action(ctx, ofpact_get_SET_MPLS_TC(a)->tc);
097d4939
JR
5555 break;
5556
9583bc14 5557 case OFPACT_SET_MPLS_TTL:
8bfd0fda 5558 compose_set_mpls_ttl_action(ctx, ofpact_get_SET_MPLS_TTL(a)->ttl);
9583bc14
EJ
5559 break;
5560
5561 case OFPACT_DEC_MPLS_TTL:
9cfef3d0 5562 if (compose_dec_mpls_ttl_action(ctx)) {
ad3efdcb 5563 return;
9583bc14
EJ
5564 }
5565 break;
5566
5567 case OFPACT_DEC_TTL:
f74e7df7 5568 wc->masks.nw_ttl = 0xff;
9583bc14 5569 if (compose_dec_ttl(ctx, ofpact_get_DEC_TTL(a))) {
ad3efdcb 5570 return;
9583bc14
EJ
5571 }
5572 break;
5573
5574 case OFPACT_NOTE:
5575 /* Nothing to do. */
5576 break;
5577
5578 case OFPACT_MULTIPATH:
33bf9176 5579 multipath_execute(ofpact_get_MULTIPATH(a), flow, wc);
2d9b49dd 5580 xlate_report_subfield(ctx, &ofpact_get_MULTIPATH(a)->dst);
9583bc14
EJ
5581 break;
5582
5583 case OFPACT_BUNDLE:
9583bc14
EJ
5584 xlate_bundle_action(ctx, ofpact_get_BUNDLE(a));
5585 break;
5586
5587 case OFPACT_OUTPUT_REG:
5588 xlate_output_reg_action(ctx, ofpact_get_OUTPUT_REG(a));
5589 break;
5590
aaca4fe0
WT
5591 case OFPACT_OUTPUT_TRUNC:
5592 xlate_output_trunc_action(ctx, ofpact_get_OUTPUT_TRUNC(a)->port,
5593 ofpact_get_OUTPUT_TRUNC(a)->max_len);
5594 break;
5595
9583bc14
EJ
5596 case OFPACT_LEARN:
5597 xlate_learn_action(ctx, ofpact_get_LEARN(a));
5598 break;
5599
2d9b49dd 5600 case OFPACT_CONJUNCTION:
afc3987b
BP
5601 /* A flow with a "conjunction" action represents part of a special
5602 * kind of "set membership match". Such a flow should not actually
5603 * get executed, but it could via, say, a "packet-out", even though
5604 * that wouldn't be useful. Log it to help debugging. */
2d9b49dd 5605 xlate_report_error(ctx, "executing no-op conjunction action");
18080541
BP
5606 break;
5607
9583bc14
EJ
5608 case OFPACT_EXIT:
5609 ctx->exit = true;
5610 break;
5611
2d9b49dd
BP
5612 case OFPACT_UNROLL_XLATE:
5613 xlate_ofpact_unroll_xlate(ctx, ofpact_get_UNROLL_XLATE(a));
e672ff9b 5614 break;
2d9b49dd 5615
9583bc14 5616 case OFPACT_FIN_TIMEOUT:
33bf9176 5617 memset(&wc->masks.nw_proto, 0xff, sizeof wc->masks.nw_proto);
9583bc14
EJ
5618 xlate_fin_timeout(ctx, ofpact_get_FIN_TIMEOUT(a));
5619 break;
5620
5621 case OFPACT_CLEAR_ACTIONS:
2d9b49dd 5622 xlate_report_action_set(ctx, "was");
7fdb60a7 5623 ofpbuf_clear(&ctx->action_set);
c61f3870
BP
5624 ctx->xin->flow.actset_output = OFPP_UNSET;
5625 ctx->action_set_has_group = false;
7fdb60a7
SH
5626 break;
5627
5628 case OFPACT_WRITE_ACTIONS:
7e7e8dbb 5629 xlate_write_actions(ctx, ofpact_get_WRITE_ACTIONS(a));
2d9b49dd 5630 xlate_report_action_set(ctx, "is");
9583bc14
EJ
5631 break;
5632
5633 case OFPACT_WRITE_METADATA:
5634 metadata = ofpact_get_WRITE_METADATA(a);
33bf9176
BP
5635 flow->metadata &= ~metadata->mask;
5636 flow->metadata |= metadata->metadata & metadata->mask;
9583bc14
EJ
5637 break;
5638
638a19b0 5639 case OFPACT_METER:
076caa2f 5640 xlate_meter_action(ctx, ofpact_get_METER(a));
638a19b0
JR
5641 break;
5642
9583bc14 5643 case OFPACT_GOTO_TABLE: {
9583bc14 5644 struct ofpact_goto_table *ogt = ofpact_get_GOTO_TABLE(a);
9583bc14 5645
9167fc1a
JR
5646 ovs_assert(ctx->table_id < ogt->table_id);
5647
4468099e 5648 xlate_table_action(ctx, ctx->xin->flow.in_port.ofp_port,
2cd20955 5649 ogt->table_id, true, true, false);
9583bc14
EJ
5650 break;
5651 }
5652
5653 case OFPACT_SAMPLE:
5654 xlate_sample_action(ctx, ofpact_get_SAMPLE(a));
5655 break;
d4abaff5 5656
7ae62a67 5657 case OFPACT_CLONE:
bef503e8 5658 xlate_clone(ctx, ofpact_get_CLONE(a));
7ae62a67
WT
5659 break;
5660
07659514 5661 case OFPACT_CT:
07659514
JS
5662 compose_conntrack_action(ctx, ofpact_get_CT(a));
5663 break;
5664
72fe7578
BP
5665 case OFPACT_CT_CLEAR:
5666 clear_conntrack(ctx);
5667 break;
5668
9ac0aada
JR
5669 case OFPACT_NAT:
5670 /* This will be processed by compose_conntrack_action(). */
5671 ctx->ct_nat_action = ofpact_get_NAT(a);
5672 break;
5673
d4abaff5 5674 case OFPACT_DEBUG_RECIRC:
1d361a81 5675 ctx_trigger_freeze(ctx);
d4abaff5
BP
5676 a = ofpact_next(a);
5677 break;
9583bc14 5678 }
1d741d6d
JR
5679
5680 /* Check if need to store this and the remaining actions for later
5681 * execution. */
1d361a81
BP
5682 if (!ctx->error && ctx->exit && ctx_first_frozen_action(ctx)) {
5683 freeze_unroll_actions(a, ofpact_end(ofpacts, ofpacts_len), ctx);
1d741d6d
JR
5684 break;
5685 }
9583bc14 5686 }
9583bc14
EJ
5687}
5688
5689void
5690xlate_in_init(struct xlate_in *xin, struct ofproto_dpif *ofproto,
1f4a8933
JR
5691 ovs_version_t version, const struct flow *flow,
5692 ofp_port_t in_port, struct rule_dpif *rule, uint16_t tcp_flags,
1520ef4f
BP
5693 const struct dp_packet *packet, struct flow_wildcards *wc,
5694 struct ofpbuf *odp_actions)
9583bc14
EJ
5695{
5696 xin->ofproto = ofproto;
1f4a8933 5697 xin->tables_version = version;
9583bc14 5698 xin->flow = *flow;
8d8ab6c2 5699 xin->upcall_flow = flow;
cc377352 5700 xin->flow.in_port.ofp_port = in_port;
c61f3870 5701 xin->flow.actset_output = OFPP_UNSET;
9583bc14 5702 xin->packet = packet;
df70a773 5703 xin->allow_side_effects = packet != NULL;
9583bc14 5704 xin->rule = rule;
b256dc52 5705 xin->xcache = NULL;
9583bc14
EJ
5706 xin->ofpacts = NULL;
5707 xin->ofpacts_len = 0;
5708 xin->tcp_flags = tcp_flags;
2d9b49dd 5709 xin->trace = NULL;
9583bc14 5710 xin->resubmit_stats = NULL;
790c5d26 5711 xin->depth = 0;
cdd42eda 5712 xin->resubmits = 0;
49a73e0c 5713 xin->wc = wc;
1520ef4f 5714 xin->odp_actions = odp_actions;
e672ff9b
JR
5715
5716 /* Do recirc lookup. */
1d361a81 5717 xin->frozen_state = NULL;
29b1ea3f
BP
5718 if (flow->recirc_id) {
5719 const struct recirc_id_node *node
5720 = recirc_id_node_find(flow->recirc_id);
5721 if (node) {
1d361a81 5722 xin->frozen_state = &node->state;
29b1ea3f
BP
5723 }
5724 }
9583bc14
EJ
5725}
5726
5727void
5728xlate_out_uninit(struct xlate_out *xout)
5729{
e672ff9b 5730 if (xout) {
fbf5d6ec 5731 recirc_refs_unref(&xout->recircs);
9583bc14
EJ
5732 }
5733}
9583bc14 5734\f
55954f6e
EJ
5735static struct skb_priority_to_dscp *
5736get_skb_priority(const struct xport *xport, uint32_t skb_priority)
5737{
5738 struct skb_priority_to_dscp *pdscp;
5739 uint32_t hash;
5740
5741 hash = hash_int(skb_priority, 0);
5742 HMAP_FOR_EACH_IN_BUCKET (pdscp, hmap_node, hash, &xport->skb_priorities) {
5743 if (pdscp->skb_priority == skb_priority) {
5744 return pdscp;
5745 }
5746 }
5747 return NULL;
5748}
5749
5750static bool
5751dscp_from_skb_priority(const struct xport *xport, uint32_t skb_priority,
5752 uint8_t *dscp)
5753{
5754 struct skb_priority_to_dscp *pdscp = get_skb_priority(xport, skb_priority);
5755 *dscp = pdscp ? pdscp->dscp : 0;
5756 return pdscp != NULL;
5757}
5758
16194afd
DDP
5759static size_t
5760count_skb_priorities(const struct xport *xport)
5761{
5762 return hmap_count(&xport->skb_priorities);
5763}
5764
55954f6e
EJ
5765static void
5766clear_skb_priorities(struct xport *xport)
5767{
4ec3d7c7 5768 struct skb_priority_to_dscp *pdscp;
55954f6e 5769
4ec3d7c7 5770 HMAP_FOR_EACH_POP (pdscp, hmap_node, &xport->skb_priorities) {
55954f6e
EJ
5771 free(pdscp);
5772 }
5773}
5774
ce4a6b76
BP
5775static bool
5776actions_output_to_local_port(const struct xlate_ctx *ctx)
5777{
46c88433 5778 odp_port_t local_odp_port = ofp_port_to_odp_port(ctx->xbridge, OFPP_LOCAL);
ce4a6b76
BP
5779 const struct nlattr *a;
5780 unsigned int left;
5781
1520ef4f
BP
5782 NL_ATTR_FOR_EACH_UNSAFE (a, left, ctx->odp_actions->data,
5783 ctx->odp_actions->size) {
ce4a6b76
BP
5784 if (nl_attr_type(a) == OVS_ACTION_ATTR_OUTPUT
5785 && nl_attr_get_odp_port(a) == local_odp_port) {
5786 return true;
5787 }
5788 }
5789 return false;
5790}
9583bc14 5791
5e2a6702 5792#if defined(__linux__)
7d031d7e
BP
5793/* Returns the maximum number of packets that the Linux kernel is willing to
5794 * queue up internally to certain kinds of software-implemented ports, or the
5795 * default (and rarely modified) value if it cannot be determined. */
5796static int
5797netdev_max_backlog(void)
5798{
5799 static struct ovsthread_once once = OVSTHREAD_ONCE_INITIALIZER;
5800 static int max_backlog = 1000; /* The normal default value. */
5801
5802 if (ovsthread_once_start(&once)) {
5803 static const char filename[] = "/proc/sys/net/core/netdev_max_backlog";
5804 FILE *stream;
5805 int n;
5806
5807 stream = fopen(filename, "r");
5808 if (!stream) {
120c348f 5809 VLOG_INFO("%s: open failed (%s)", filename, ovs_strerror(errno));
7d031d7e
BP
5810 } else {
5811 if (fscanf(stream, "%d", &n) != 1) {
5812 VLOG_WARN("%s: read error", filename);
5813 } else if (n <= 100) {
5814 VLOG_WARN("%s: unexpectedly small value %d", filename, n);
5815 } else {
5816 max_backlog = n;
5817 }
5818 fclose(stream);
5819 }
5820 ovsthread_once_done(&once);
5821
5822 VLOG_DBG("%s: using %d max_backlog", filename, max_backlog);
5823 }
5824
5825 return max_backlog;
5826}
5827
5828/* Counts and returns the number of OVS_ACTION_ATTR_OUTPUT actions in
5829 * 'odp_actions'. */
5830static int
5831count_output_actions(const struct ofpbuf *odp_actions)
5832{
5833 const struct nlattr *a;
5834 size_t left;
5835 int n = 0;
5836
6fd6ed71 5837 NL_ATTR_FOR_EACH_UNSAFE (a, left, odp_actions->data, odp_actions->size) {
7d031d7e
BP
5838 if (a->nla_type == OVS_ACTION_ATTR_OUTPUT) {
5839 n++;
5840 }
5841 }
5842 return n;
5843}
5e2a6702 5844#endif /* defined(__linux__) */
7d031d7e
BP
5845
5846/* Returns true if 'odp_actions' contains more output actions than the datapath
5847 * can reliably handle in one go. On Linux, this is the value of the
5848 * net.core.netdev_max_backlog sysctl, which limits the maximum number of
5849 * packets that the kernel is willing to queue up for processing while the
5850 * datapath is processing a set of actions. */
5851static bool
5e2a6702 5852too_many_output_actions(const struct ofpbuf *odp_actions OVS_UNUSED)
7d031d7e
BP
5853{
5854#ifdef __linux__
6fd6ed71 5855 return (odp_actions->size / NL_A_U32_SIZE > netdev_max_backlog()
7d031d7e
BP
5856 && count_output_actions(odp_actions) > netdev_max_backlog());
5857#else
5858 /* OSes other than Linux might have similar limits, but we don't know how
5859 * to determine them.*/
5860 return false;
5861#endif
5862}
5863
234c3da9
BP
5864static void
5865xlate_wc_init(struct xlate_ctx *ctx)
5866{
5867 flow_wildcards_init_catchall(ctx->wc);
5868
5869 /* Some fields we consider to always be examined. */
5e2e998a
JR
5870 WC_MASK_FIELD(ctx->wc, in_port);
5871 WC_MASK_FIELD(ctx->wc, dl_type);
234c3da9 5872 if (is_ip_any(&ctx->xin->flow)) {
5e2e998a 5873 WC_MASK_FIELD_MASK(ctx->wc, nw_frag, FLOW_NW_FRAG_MASK);
234c3da9
BP
5874 }
5875
5876 if (ctx->xbridge->support.odp.recirc) {
5877 /* Always exactly match recirc_id when datapath supports
5878 * recirculation. */
5e2e998a 5879 WC_MASK_FIELD(ctx->wc, recirc_id);
234c3da9
BP
5880 }
5881
5882 if (ctx->xbridge->netflow) {
5883 netflow_mask_wc(&ctx->xin->flow, ctx->wc);
5884 }
5885
5886 tnl_wc_init(&ctx->xin->flow, ctx->wc);
5887}
5888
5889static void
5890xlate_wc_finish(struct xlate_ctx *ctx)
5891{
5892 /* Clear the metadata and register wildcard masks, because we won't
5893 * use non-header fields as part of the cache. */
5894 flow_wildcards_clear_non_packet_fields(ctx->wc);
5895
5896 /* ICMPv4 and ICMPv6 have 8-bit "type" and "code" fields. struct flow
5897 * uses the low 8 bits of the 16-bit tp_src and tp_dst members to
5898 * represent these fields. The datapath interface, on the other hand,
5899 * represents them with just 8 bits each. This means that if the high
5900 * 8 bits of the masks for these fields somehow become set, then they
5901 * will get chopped off by a round trip through the datapath, and
5902 * revalidation will spot that as an inconsistency and delete the flow.
5903 * Avoid the problem here by making sure that only the low 8 bits of
5904 * either field can be unwildcarded for ICMP.
5905 */
a75636c8 5906 if (is_icmpv4(&ctx->xin->flow, NULL) || is_icmpv6(&ctx->xin->flow, NULL)) {
234c3da9
BP
5907 ctx->wc->masks.tp_src &= htons(UINT8_MAX);
5908 ctx->wc->masks.tp_dst &= htons(UINT8_MAX);
5909 }
5910 /* VLAN_TCI CFI bit must be matched if any of the TCI is matched. */
5911 if (ctx->wc->masks.vlan_tci) {
5912 ctx->wc->masks.vlan_tci |= htons(VLAN_CFI);
5913 }
4a7ab326
DDP
5914
5915 /* The classifier might return masks that match on tp_src and tp_dst even
5916 * for later fragments. This happens because there might be flows that
5917 * match on tp_src or tp_dst without matching on the frag bits, because
5918 * it is not a prerequisite for OpenFlow. Since it is a prerequisite for
5919 * datapath flows and since tp_src and tp_dst are always going to be 0,
5920 * wildcard the fields here. */
5921 if (ctx->xin->flow.nw_frag & FLOW_NW_FRAG_LATER) {
5922 ctx->wc->masks.tp_src = 0;
5923 ctx->wc->masks.tp_dst = 0;
5924 }
234c3da9
BP
5925}
5926
e672ff9b
JR
5927/* Translates the flow, actions, or rule in 'xin' into datapath actions in
5928 * 'xout'.
56450a41 5929 * The caller must take responsibility for eventually freeing 'xout', with
fff1b9c0
JR
5930 * xlate_out_uninit().
5931 * Returns 'XLATE_OK' if translation was successful. In case of an error an
5932 * empty set of actions will be returned in 'xin->odp_actions' (if non-NULL),
5933 * so that most callers may ignore the return value and transparently install a
5934 * drop flow when the translation fails. */
5935enum xlate_error
84f0f298 5936xlate_actions(struct xlate_in *xin, struct xlate_out *xout)
9583bc14 5937{
e467ea42
BP
5938 *xout = (struct xlate_out) {
5939 .slow = 0,
fbf5d6ec 5940 .recircs = RECIRC_REFS_EMPTY_INITIALIZER,
e467ea42
BP
5941 };
5942
84f0f298 5943 struct xlate_cfg *xcfg = ovsrcu_get(struct xlate_cfg *, &xcfgp);
bb00fdef
BP
5944 struct xbridge *xbridge = xbridge_lookup(xcfg, xin->ofproto);
5945 if (!xbridge) {
fff1b9c0 5946 return XLATE_BRIDGE_NOT_FOUND;
bb00fdef
BP
5947 }
5948
33bf9176
BP
5949 struct flow *flow = &xin->flow;
5950
84cf3c1f 5951 uint8_t stack_stub[1024];
bb00fdef 5952 uint64_t action_set_stub[1024 / 8];
1d361a81 5953 uint64_t frozen_actions_stub[1024 / 8];
1520ef4f
BP
5954 uint64_t actions_stub[256 / 8];
5955 struct ofpbuf scratch_actions = OFPBUF_STUB_INITIALIZER(actions_stub);
bb00fdef
BP
5956 struct xlate_ctx ctx = {
5957 .xin = xin,
5958 .xout = xout,
5959 .base_flow = *flow,
c2b878e0 5960 .orig_tunnel_ipv6_dst = flow_tnl_dst(&flow->tunnel),
bb00fdef
BP
5961 .xbridge = xbridge,
5962 .stack = OFPBUF_STUB_INITIALIZER(stack_stub),
5963 .rule = xin->rule,
c0e638aa
BP
5964 .wc = (xin->wc
5965 ? xin->wc
f36efd90 5966 : &(struct flow_wildcards) { .masks = { .dl_type = 0 } }),
1520ef4f 5967 .odp_actions = xin->odp_actions ? xin->odp_actions : &scratch_actions,
bb00fdef 5968
790c5d26 5969 .depth = xin->depth,
cdd42eda 5970 .resubmits = xin->resubmits,
bb00fdef
BP
5971 .in_group = false,
5972 .in_action_set = false,
5973
5974 .table_id = 0,
5975 .rule_cookie = OVS_BE64_MAX,
5976 .orig_skb_priority = flow->skb_priority,
5977 .sflow_n_outputs = 0,
5978 .sflow_odp_port = 0,
2031ef97 5979 .nf_output_iface = NF_OUT_DROP,
bb00fdef 5980 .exit = false,
fff1b9c0 5981 .error = XLATE_OK,
3d6151f3 5982 .mirrors = 0,
bb00fdef 5983
1d361a81 5984 .freezing = false,
53cc166a 5985 .recirc_update_dp_hash = false,
1d361a81 5986 .frozen_actions = OFPBUF_STUB_INITIALIZER(frozen_actions_stub),
77ab5fd2 5987 .pause = NULL,
bb00fdef 5988
e12ec36b 5989 .was_mpls = false,
07659514 5990 .conntracked = false,
bb00fdef 5991
9ac0aada
JR
5992 .ct_nat_action = NULL,
5993
bb00fdef
BP
5994 .action_set_has_group = false,
5995 .action_set = OFPBUF_STUB_INITIALIZER(action_set_stub),
5996 };
865ca6cf
BP
5997
5998 /* 'base_flow' reflects the packet as it came in, but we need it to reflect
42deb67d
PS
5999 * the packet as the datapath will treat it for output actions. Our
6000 * datapath doesn't retain tunneling information without us re-setting
6001 * it, so clear the tunnel data.
865ca6cf 6002 */
42deb67d 6003
bb00fdef 6004 memset(&ctx.base_flow.tunnel, 0, sizeof ctx.base_flow.tunnel);
865ca6cf 6005
1520ef4f 6006 ofpbuf_reserve(ctx.odp_actions, NL_A_U32_SIZE);
c0e638aa 6007 xlate_wc_init(&ctx);
bb00fdef 6008
46c88433 6009 COVERAGE_INC(xlate_actions);
9583bc14 6010
2d9b49dd
BP
6011 xin->trace = xlate_report(&ctx, OFT_BRIDGE, "bridge(\"%s\")",
6012 xbridge->name);
1d361a81
BP
6013 if (xin->frozen_state) {
6014 const struct frozen_state *state = xin->frozen_state;
e672ff9b 6015
2d9b49dd
BP
6016 struct ovs_list *old_trace = xin->trace;
6017 xin->trace = xlate_report(&ctx, OFT_THAW, "thaw");
d6bef3cc 6018
e672ff9b 6019 if (xin->ofpacts_len > 0 || ctx.rule) {
2d9b49dd
BP
6020 xlate_report_error(&ctx, "Recirculation conflict (%s)!",
6021 xin->ofpacts_len ? "actions" : "rule");
fff1b9c0 6022 ctx.error = XLATE_RECIRCULATION_CONFLICT;
1520ef4f 6023 goto exit;
e672ff9b
JR
6024 }
6025
6026 /* Set the bridge for post-recirculation processing if needed. */
07a3cd5c 6027 if (!uuid_equals(&ctx.xbridge->ofproto->uuid, &state->ofproto_uuid)) {
e672ff9b 6028 struct xlate_cfg *xcfg = ovsrcu_get(struct xlate_cfg *, &xcfgp);
2082425c 6029 const struct xbridge *new_bridge
290835f9 6030 = xbridge_lookup_by_uuid(xcfg, &state->ofproto_uuid);
e672ff9b
JR
6031
6032 if (OVS_UNLIKELY(!new_bridge)) {
6033 /* Drop the packet if the bridge cannot be found. */
2d9b49dd 6034 xlate_report_error(&ctx, "Frozen bridge no longer exists.");
fff1b9c0 6035 ctx.error = XLATE_BRIDGE_NOT_FOUND;
2d9b49dd 6036 xin->trace = old_trace;
1520ef4f 6037 goto exit;
e672ff9b
JR
6038 }
6039 ctx.xbridge = new_bridge;
1f4a8933
JR
6040 /* The bridge is now known so obtain its table version. */
6041 ctx.xin->tables_version
6042 = ofproto_dpif_get_tables_version(ctx.xbridge->ofproto);
e672ff9b
JR
6043 }
6044
1d361a81
BP
6045 /* Set the thawed table id. Note: A table lookup is done only if there
6046 * are no frozen actions. */
2082425c 6047 ctx.table_id = state->table_id;
2d9b49dd
BP
6048 xlate_report(&ctx, OFT_THAW,
6049 "Resuming from table %"PRIu8, ctx.table_id);
e672ff9b 6050
07659514 6051 if (!state->conntracked) {
72fe7578 6052 clear_conntrack(&ctx);
07659514
JS
6053 }
6054
e672ff9b 6055 /* Restore pipeline metadata. May change flow's in_port and other
1d361a81
BP
6056 * metadata to the values that existed when freezing was triggered. */
6057 frozen_metadata_to_flow(&state->metadata, flow);
e672ff9b
JR
6058
6059 /* Restore stack, if any. */
2082425c 6060 if (state->stack) {
84cf3c1f 6061 ofpbuf_put(&ctx.stack, state->stack, state->stack_size);
e672ff9b
JR
6062 }
6063
29bae541
BP
6064 /* Restore mirror state. */
6065 ctx.mirrors = state->mirrors;
6066
e672ff9b 6067 /* Restore action set, if any. */
2082425c 6068 if (state->action_set_len) {
2d9b49dd 6069 xlate_report_actions(&ctx, OFT_THAW, "Restoring action set",
417509fa 6070 state->action_set, state->action_set_len);
d6bef3cc 6071
7e7e8dbb
BP
6072 flow->actset_output = OFPP_UNSET;
6073 xlate_write_actions__(&ctx, state->action_set,
6074 state->action_set_len);
e672ff9b
JR
6075 }
6076
1d361a81
BP
6077 /* Restore frozen actions. If there are no actions, processing will
6078 * start with a lookup in the table set above. */
417509fa
BP
6079 xin->ofpacts = state->ofpacts;
6080 xin->ofpacts_len = state->ofpacts_len;
6081 if (state->ofpacts_len) {
2d9b49dd 6082 xlate_report_actions(&ctx, OFT_THAW, "Restoring actions",
d6bef3cc 6083 xin->ofpacts, xin->ofpacts_len);
e672ff9b 6084 }
e672ff9b 6085
2d9b49dd
BP
6086 xin->trace = old_trace;
6087 } else if (OVS_UNLIKELY(flow->recirc_id)) {
6088 xlate_report_error(&ctx,
6089 "Recirculation context not found for ID %"PRIx32,
6090 flow->recirc_id);
fff1b9c0 6091 ctx.error = XLATE_NO_RECIRCULATION_CONTEXT;
1520ef4f 6092 goto exit;
e672ff9b 6093 }
9583bc14 6094
8d8ab6c2
JG
6095 /* Tunnel metadata in udpif format must be normalized before translation. */
6096 if (flow->tunnel.flags & FLOW_TNL_F_UDPIF) {
5b09d9f7
MS
6097 const struct tun_table *tun_tab = ofproto_get_tun_tab(
6098 &ctx.xbridge->ofproto->up);
8d8ab6c2
JG
6099 int err;
6100
6101 err = tun_metadata_from_geneve_udpif(tun_tab, &xin->upcall_flow->tunnel,
6102 &xin->upcall_flow->tunnel,
6103 &flow->tunnel);
6104 if (err) {
2d9b49dd 6105 xlate_report_error(&ctx, "Invalid Geneve tunnel metadata");
8d8ab6c2
JG
6106 ctx.error = XLATE_INVALID_TUNNEL_METADATA;
6107 goto exit;
6108 }
6109 } else if (!flow->tunnel.metadata.tab) {
6110 /* If the original flow did not come in on a tunnel, then it won't have
6111 * FLOW_TNL_F_UDPIF set. However, we still need to have a metadata
6112 * table in case we generate tunnel actions. */
5b09d9f7
MS
6113 flow->tunnel.metadata.tab = ofproto_get_tun_tab(
6114 &ctx.xbridge->ofproto->up);
8d8ab6c2
JG
6115 }
6116 ctx.wc->masks.tunnel.metadata.tab = flow->tunnel.metadata.tab;
6117
10c44245 6118 if (!xin->ofpacts && !ctx.rule) {
b2e89cc9 6119 ctx.rule = rule_dpif_lookup_from_table(
1f4a8933 6120 ctx.xbridge->ofproto, ctx.xin->tables_version, flow, ctx.wc,
1e1e1d19 6121 ctx.xin->resubmit_stats, &ctx.table_id,
a027899e 6122 flow->in_port.ofp_port, true, true, ctx.xin->xcache);
10c44245 6123 if (ctx.xin->resubmit_stats) {
b2e89cc9 6124 rule_dpif_credit_stats(ctx.rule, ctx.xin->resubmit_stats);
10c44245 6125 }
b256dc52
JS
6126 if (ctx.xin->xcache) {
6127 struct xc_entry *entry;
6128
6129 entry = xlate_cache_add_entry(ctx.xin->xcache, XC_RULE);
901a517e 6130 entry->rule = ctx.rule;
07a3cd5c 6131 ofproto_rule_ref(&ctx.rule->up);
b256dc52 6132 }
a8c31348 6133
2d9b49dd 6134 xlate_report_table(&ctx, ctx.rule, ctx.table_id);
10c44245 6135 }
10c44245 6136
1d361a81 6137 /* Get the proximate input port of the packet. (If xin->frozen_state,
14d2b8b2
BP
6138 * flow->in_port is the ultimate input port of the packet.) */
6139 struct xport *in_port = get_ofp_port(xbridge,
6140 ctx.base_flow.in_port.ofp_port);
6141
1d361a81
BP
6142 /* Tunnel stats only for not-thawed packets. */
6143 if (!xin->frozen_state && in_port && in_port->is_tunnel) {
b256dc52
JS
6144 if (ctx.xin->resubmit_stats) {
6145 netdev_vport_inc_rx(in_port->netdev, ctx.xin->resubmit_stats);
6146 if (in_port->bfd) {
6147 bfd_account_rx(in_port->bfd, ctx.xin->resubmit_stats);
6148 }
6149 }
6150 if (ctx.xin->xcache) {
6151 struct xc_entry *entry;
6152
6153 entry = xlate_cache_add_entry(ctx.xin->xcache, XC_NETDEV);
901a517e
JR
6154 entry->dev.rx = netdev_ref(in_port->netdev);
6155 entry->dev.bfd = bfd_ref(in_port->bfd);
d6fc5f57
EJ
6156 }
6157 }
6158
1d361a81 6159 if (!xin->frozen_state && process_special(&ctx, in_port)) {
bef1403e
BP
6160 /* process_special() did all the processing for this packet.
6161 *
1d361a81
BP
6162 * We do not perform special processing on thawed packets, since that
6163 * was done before they were frozen and should not be redone. */
bef1403e
BP
6164 } else if (in_port && in_port->xbundle
6165 && xbundle_mirror_out(xbridge, in_port->xbundle)) {
2d9b49dd
BP
6166 xlate_report_error(&ctx, "dropping packet received on port "
6167 "%s, which is reserved exclusively for mirroring",
6168 in_port->xbundle->name);
bef1403e 6169 } else {
1d361a81 6170 /* Sampling is done on initial reception; don't redo after thawing. */
a6092018 6171 unsigned int user_cookie_offset = 0;
1d361a81 6172 if (!xin->frozen_state) {
a6092018
BP
6173 user_cookie_offset = compose_sflow_action(&ctx);
6174 compose_ipfix_action(&ctx, ODPP_NONE);
e672ff9b 6175 }
0731abc5 6176 size_t sample_actions_len = ctx.odp_actions->size;
9583bc14 6177
234c3da9
BP
6178 if (tnl_process_ecn(flow)
6179 && (!in_port || may_receive(in_port, &ctx))) {
1806291d
BP
6180 const struct ofpact *ofpacts;
6181 size_t ofpacts_len;
6182
6183 if (xin->ofpacts) {
6184 ofpacts = xin->ofpacts;
6185 ofpacts_len = xin->ofpacts_len;
6186 } else if (ctx.rule) {
6187 const struct rule_actions *actions
07a3cd5c 6188 = rule_get_actions(&ctx.rule->up);
1806291d
BP
6189 ofpacts = actions->ofpacts;
6190 ofpacts_len = actions->ofpacts_len;
07a3cd5c 6191 ctx.rule_cookie = ctx.rule->up.flow_cookie;
1806291d
BP
6192 } else {
6193 OVS_NOT_REACHED();
6194 }
6195
7efbc3b7 6196 mirror_ingress_packet(&ctx);
9583bc14 6197 do_xlate_actions(ofpacts, ofpacts_len, &ctx);
fff1b9c0
JR
6198 if (ctx.error) {
6199 goto exit;
6200 }
9583bc14
EJ
6201
6202 /* We've let OFPP_NORMAL and the learning action look at the
1d361a81 6203 * packet, so cancel all actions and freezing if forwarding is
8a5fb3b4 6204 * disabled. */
9efd308e
DV
6205 if (in_port && (!xport_stp_forward_state(in_port) ||
6206 !xport_rstp_forward_state(in_port))) {
1520ef4f 6207 ctx.odp_actions->size = sample_actions_len;
1d361a81 6208 ctx_cancel_freeze(&ctx);
8a5fb3b4
BP
6209 ofpbuf_clear(&ctx.action_set);
6210 }
6211
1d361a81 6212 if (!ctx.freezing) {
8a5fb3b4 6213 xlate_action_set(&ctx);
e672ff9b 6214 }
1d361a81 6215 if (ctx.freezing) {
77ab5fd2 6216 finish_freezing(&ctx);
9583bc14
EJ
6217 }
6218 }
6219
e672ff9b 6220 /* Output only fully processed packets. */
1d361a81 6221 if (!ctx.freezing
e672ff9b 6222 && xbridge->has_in_band
ce4a6b76
BP
6223 && in_band_must_output_to_local_port(flow)
6224 && !actions_output_to_local_port(&ctx)) {
e93ef1c7 6225 compose_output_action(&ctx, OFPP_LOCAL, NULL);
9583bc14 6226 }
aaa0fbae 6227
a6092018
BP
6228 if (user_cookie_offset) {
6229 fix_sflow_action(&ctx, user_cookie_offset);
e672ff9b 6230 }
9583bc14
EJ
6231 }
6232
1520ef4f 6233 if (nl_attr_oversized(ctx.odp_actions->size)) {
542024c4 6234 /* These datapath actions are too big for a Netlink attribute, so we
0f032e95
BP
6235 * can't hand them to the kernel directly. dpif_execute() can execute
6236 * them one by one with help, so just mark the result as SLOW_ACTION to
6237 * prevent the flow from being installed. */
6238 COVERAGE_INC(xlate_actions_oversize);
6239 ctx.xout->slow |= SLOW_ACTION;
1520ef4f 6240 } else if (too_many_output_actions(ctx.odp_actions)) {
7d031d7e
BP
6241 COVERAGE_INC(xlate_actions_too_many_output);
6242 ctx.xout->slow |= SLOW_ACTION;
542024c4
BP
6243 }
6244
1d361a81
BP
6245 /* Do netflow only for packets on initial reception, that are not sent to
6246 * the controller. We consider packets sent to the controller to be part
6247 * of the control plane rather than the data plane. */
6248 if (!xin->frozen_state
6249 && xbridge->netflow
6250 && !(xout->slow & SLOW_CONTROLLER)) {
1806291d
BP
6251 if (ctx.xin->resubmit_stats) {
6252 netflow_flow_update(xbridge->netflow, flow,
2031ef97 6253 ctx.nf_output_iface,
1806291d
BP
6254 ctx.xin->resubmit_stats);
6255 }
6256 if (ctx.xin->xcache) {
6257 struct xc_entry *entry;
b256dc52 6258
1806291d 6259 entry = xlate_cache_add_entry(ctx.xin->xcache, XC_NETFLOW);
901a517e
JR
6260 entry->nf.netflow = netflow_ref(xbridge->netflow);
6261 entry->nf.flow = xmemdup(flow, sizeof *flow);
6262 entry->nf.iface = ctx.nf_output_iface;
d6fc5f57
EJ
6263 }
6264 }
6265
8d8ab6c2
JG
6266 /* Translate tunnel metadata masks to udpif format if necessary. */
6267 if (xin->upcall_flow->tunnel.flags & FLOW_TNL_F_UDPIF) {
6268 if (ctx.wc->masks.tunnel.metadata.present.map) {
6269 const struct flow_tnl *upcall_tnl = &xin->upcall_flow->tunnel;
6270 struct geneve_opt opts[TLV_TOT_OPT_SIZE /
6271 sizeof(struct geneve_opt)];
6272
6273 tun_metadata_to_geneve_udpif_mask(&flow->tunnel,
6274 &ctx.wc->masks.tunnel,
6275 upcall_tnl->metadata.opts.gnv,
6276 upcall_tnl->metadata.present.len,
6277 opts);
6278 memset(&ctx.wc->masks.tunnel.metadata, 0,
6279 sizeof ctx.wc->masks.tunnel.metadata);
6280 memcpy(&ctx.wc->masks.tunnel.metadata.opts.gnv, opts,
6281 upcall_tnl->metadata.present.len);
6282 }
6283 ctx.wc->masks.tunnel.metadata.present.len = 0xff;
6284 ctx.wc->masks.tunnel.metadata.tab = NULL;
6285 ctx.wc->masks.tunnel.flags |= FLOW_TNL_F_UDPIF;
6286 } else if (!xin->upcall_flow->tunnel.metadata.tab) {
6287 /* If we didn't have options in UDPIF format and didn't have an existing
6288 * metadata table, then it means that there were no options at all when
6289 * we started processing and any wildcards we picked up were from
6290 * action generation. Without options on the incoming packet, wildcards
6291 * aren't meaningful. To avoid them possibly getting misinterpreted,
6292 * just clear everything. */
6293 if (ctx.wc->masks.tunnel.metadata.present.map) {
6294 memset(&ctx.wc->masks.tunnel.metadata, 0,
6295 sizeof ctx.wc->masks.tunnel.metadata);
6296 } else {
6297 ctx.wc->masks.tunnel.metadata.tab = NULL;
6298 }
6299 }
6300
c0e638aa 6301 xlate_wc_finish(&ctx);
1520ef4f
BP
6302
6303exit:
8d8ab6c2
JG
6304 /* Reset the table to what it was when we came in. If we only fetched
6305 * it locally, then it has no meaning outside of flow translation. */
6306 flow->tunnel.metadata.tab = xin->upcall_flow->tunnel.metadata.tab;
6307
1520ef4f
BP
6308 ofpbuf_uninit(&ctx.stack);
6309 ofpbuf_uninit(&ctx.action_set);
1d361a81 6310 ofpbuf_uninit(&ctx.frozen_actions);
1520ef4f 6311 ofpbuf_uninit(&scratch_actions);
fff1b9c0
JR
6312
6313 /* Make sure we return a "drop flow" in case of an error. */
6314 if (ctx.error) {
6315 xout->slow = 0;
6316 if (xin->odp_actions) {
6317 ofpbuf_clear(xin->odp_actions);
6318 }
6319 }
6320 return ctx.error;
91d6cd12
AW
6321}
6322
77ab5fd2
BP
6323enum ofperr
6324xlate_resume(struct ofproto_dpif *ofproto,
6325 const struct ofputil_packet_in_private *pin,
6326 struct ofpbuf *odp_actions,
6327 enum slow_path_reason *slow)
6328{
6329 struct dp_packet packet;
6330 dp_packet_use_const(&packet, pin->public.packet,
6331 pin->public.packet_len);
6332
6333 struct flow flow;
6334 flow_extract(&packet, &flow);
6335
6336 struct xlate_in xin;
1f4a8933
JR
6337 xlate_in_init(&xin, ofproto, ofproto_dpif_get_tables_version(ofproto),
6338 &flow, 0, NULL, ntohs(flow.tcp_flags),
77ab5fd2
BP
6339 &packet, NULL, odp_actions);
6340
6341 struct ofpact_note noop;
6342 ofpact_init_NOTE(&noop);
6343 noop.length = 0;
6344
6345 bool any_actions = pin->actions_len > 0;
6346 struct frozen_state state = {
6347 .table_id = 0, /* Not the table where NXAST_PAUSE was executed. */
6348 .ofproto_uuid = pin->bridge,
6349 .stack = pin->stack,
84cf3c1f 6350 .stack_size = pin->stack_size,
77ab5fd2
BP
6351 .mirrors = pin->mirrors,
6352 .conntracked = pin->conntracked,
6353
6354 /* When there are no actions, xlate_actions() will search the flow
6355 * table. We don't want it to do that (we want it to resume), so
6356 * supply a no-op action if there aren't any.
6357 *
6358 * (We can't necessarily avoid translating actions entirely if there
6359 * aren't any actions, because there might be some finishing-up to do
6360 * at the end of the pipeline, and we don't check for those
6361 * conditions.) */
6362 .ofpacts = any_actions ? pin->actions : &noop.ofpact,
6363 .ofpacts_len = any_actions ? pin->actions_len : sizeof noop,
6364
6365 .action_set = pin->action_set,
6366 .action_set_len = pin->action_set_len,
6367 };
6368 frozen_metadata_from_flow(&state.metadata,
6369 &pin->public.flow_metadata.flow);
6370 xin.frozen_state = &state;
6371
6372 struct xlate_out xout;
6373 enum xlate_error error = xlate_actions(&xin, &xout);
6374 *slow = xout.slow;
6375 xlate_out_uninit(&xout);
6376
6377 /* xlate_actions() can generate a number of errors, but only
6378 * XLATE_BRIDGE_NOT_FOUND really stands out to me as one that we should be
6379 * sure to report over OpenFlow. The others could come up in packet-outs
6380 * or regular flow translation and I don't think that it's going to be too
6381 * useful to report them to the controller. */
6382 return error == XLATE_BRIDGE_NOT_FOUND ? OFPERR_NXR_STALE : 0;
6383}
6384
2eb79142
JG
6385/* Sends 'packet' out 'ofport'. If 'port' is a tunnel and that tunnel type
6386 * supports a notion of an OAM flag, sets it if 'oam' is true.
91d6cd12
AW
6387 * May modify 'packet'.
6388 * Returns 0 if successful, otherwise a positive errno value. */
6389int
2eb79142
JG
6390xlate_send_packet(const struct ofport_dpif *ofport, bool oam,
6391 struct dp_packet *packet)
91d6cd12 6392{
84f0f298 6393 struct xlate_cfg *xcfg = ovsrcu_get(struct xlate_cfg *, &xcfgp);
91d6cd12 6394 struct xport *xport;
2eb79142
JG
6395 uint64_t ofpacts_stub[1024 / 8];
6396 struct ofpbuf ofpacts;
91d6cd12 6397 struct flow flow;
91d6cd12 6398
2eb79142 6399 ofpbuf_use_stack(&ofpacts, ofpacts_stub, sizeof ofpacts_stub);
91d6cd12 6400 /* Use OFPP_NONE as the in_port to avoid special packet processing. */
cf62fa4c 6401 flow_extract(packet, &flow);
b5e7e61a 6402 flow.in_port.ofp_port = OFPP_NONE;
91d6cd12 6403
84f0f298 6404 xport = xport_lookup(xcfg, ofport);
91d6cd12 6405 if (!xport) {
02ea2703 6406 return EINVAL;
91d6cd12 6407 }
2eb79142
JG
6408
6409 if (oam) {
128684a6
JR
6410 const ovs_be16 oam = htons(NX_TUN_FLAG_OAM);
6411 ofpact_put_set_field(&ofpacts, mf_from_id(MFF_TUN_FLAGS), &oam, &oam);
2eb79142
JG
6412 }
6413
6414 ofpact_put_OUTPUT(&ofpacts)->port = xport->ofp_port;
e491a67a 6415
1f4a8933
JR
6416 /* Actions here are not referring to anything versionable (flow tables or
6417 * groups) so we don't need to worry about the version here. */
6418 return ofproto_dpif_execute_actions(xport->xbridge->ofproto,
6419 OVS_VERSION_MAX, &flow, NULL,
2eb79142 6420 ofpacts.data, ofpacts.size, packet);
9583bc14 6421}
b256dc52 6422
901a517e 6423void
064799a1
JR
6424xlate_mac_learning_update(const struct ofproto_dpif *ofproto,
6425 ofp_port_t in_port, struct eth_addr dl_src,
6426 int vlan, bool is_grat_arp)
b256dc52 6427{
84f0f298 6428 struct xlate_cfg *xcfg = ovsrcu_get(struct xlate_cfg *, &xcfgp);
b256dc52
JS
6429 struct xbridge *xbridge;
6430 struct xbundle *xbundle;
b256dc52 6431
84f0f298 6432 xbridge = xbridge_lookup(xcfg, ofproto);
b256dc52
JS
6433 if (!xbridge) {
6434 return;
6435 }
6436
2d9b49dd 6437 xbundle = lookup_input_bundle__(xbridge, in_port, NULL);
b256dc52
JS
6438 if (!xbundle) {
6439 return;
6440 }
6441
2d9b49dd 6442 update_learning_table__(xbridge, xbundle, dl_src, vlan, is_grat_arp);
b256dc52 6443}
bef503e8
AZ
6444
6445void
6446xlate_disable_dp_clone(const struct ofproto_dpif *ofproto)
6447{
6448 struct xlate_cfg *xcfg = ovsrcu_get(struct xlate_cfg *, &xcfgp);
6449 struct xbridge *xbridge = xbridge_lookup(xcfg, ofproto);
6450
6451 if (xbridge) {
6452 xbridge->support.clone = false;
6453 }
6454}