]> git.proxmox.com Git - ovs.git/blame - ofproto/ofproto-dpif-xlate.c
Documentation: Add note about dpdkvhostuser and IOMMU.
[ovs.git] / ofproto / ofproto-dpif-xlate.c
CommitLineData
b827b231 1/* Copyright (c) 2009, 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017 Nicira, Inc.
9583bc14
EJ
2 *
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License. */
14
15#include <config.h>
16
17#include "ofproto/ofproto-dpif-xlate.h"
18
8449c4d6 19#include <errno.h>
b2befd5b
BP
20#include <sys/types.h>
21#include <netinet/in.h>
a36de779
PS
22#include <arpa/inet.h>
23#include <net/if.h>
24#include <sys/socket.h>
8449c4d6 25
db7d4e46 26#include "bfd.h"
9583bc14
EJ
27#include "bitmap.h"
28#include "bond.h"
29#include "bundle.h"
30#include "byte-order.h"
db7d4e46 31#include "cfm.h"
9583bc14
EJ
32#include "connmgr.h"
33#include "coverage.h"
46445c63 34#include "csum.h"
e14deea0 35#include "dp-packet.h"
9583bc14 36#include "dpif.h"
f7f1ea29 37#include "in-band.h"
db7d4e46 38#include "lacp.h"
9583bc14
EJ
39#include "learn.h"
40#include "mac-learning.h"
6d95c4e8 41#include "mcast-snooping.h"
9583bc14
EJ
42#include "multipath.h"
43#include "netdev-vport.h"
44#include "netlink.h"
45#include "nx-match.h"
46#include "odp-execute.h"
9583bc14 47#include "ofproto/ofproto-dpif-ipfix.h"
ec7ceaed 48#include "ofproto/ofproto-dpif-mirror.h"
60d02c72 49#include "ofproto/ofproto-dpif-monitor.h"
9583bc14 50#include "ofproto/ofproto-dpif-sflow.h"
2d9b49dd 51#include "ofproto/ofproto-dpif-trace.h"
901a517e 52#include "ofproto/ofproto-dpif-xlate-cache.h"
9583bc14 53#include "ofproto/ofproto-dpif.h"
6f00e29b 54#include "ofproto/ofproto-provider.h"
b598f214
BW
55#include "openvswitch/dynamic-string.h"
56#include "openvswitch/meta-flow.h"
57#include "openvswitch/list.h"
58#include "openvswitch/ofp-actions.h"
1fc11c59 59#include "openvswitch/ofp-ed-props.h"
b598f214
BW
60#include "openvswitch/vlog.h"
61#include "ovs-lldp.h"
a36de779 62#include "ovs-router.h"
b598f214
BW
63#include "packets.h"
64#include "tnl-neigh-cache.h"
a36de779 65#include "tnl-ports.h"
9583bc14 66#include "tunnel.h"
ee89ea7b 67#include "util.h"
ab108418 68#include "uuid.h"
9583bc14 69
46c88433 70COVERAGE_DEFINE(xlate_actions);
0f032e95 71COVERAGE_DEFINE(xlate_actions_oversize);
7d031d7e 72COVERAGE_DEFINE(xlate_actions_too_many_output);
9583bc14
EJ
73
74VLOG_DEFINE_THIS_MODULE(ofproto_dpif_xlate);
75
8a553e9a 76/* Maximum depth of flow table recursion (due to resubmit actions) in a
790c5d26
BP
77 * flow translation.
78 *
79 * The goal of limiting the depth of resubmits is to ensure that flow
80 * translation eventually terminates. Only resubmits to the same table or an
81 * earlier table count against the maximum depth. This is because resubmits to
82 * strictly monotonically increasing table IDs will eventually terminate, since
83 * any OpenFlow switch has a finite number of tables. OpenFlow tables are most
84 * commonly traversed in numerically increasing order, so this limit has little
85 * effect on conventionally designed OpenFlow pipelines.
86 *
87 * Outputs to patch ports and to groups also count against the depth limit. */
88#define MAX_DEPTH 64
8a553e9a 89
98b07853
BP
90/* Maximum number of resubmit actions in a flow translation, whether they are
91 * recursive or not. */
790c5d26 92#define MAX_RESUBMITS (MAX_DEPTH * MAX_DEPTH)
98b07853 93
46c88433
EJ
94struct xbridge {
95 struct hmap_node hmap_node; /* Node in global 'xbridges' map. */
96 struct ofproto_dpif *ofproto; /* Key in global 'xbridges' map. */
97
ca6ba700 98 struct ovs_list xbundles; /* Owned xbundles. */
46c88433
EJ
99 struct hmap xports; /* Indexed by ofp_port. */
100
101 char *name; /* Name used in log messages. */
89a8a7f0 102 struct dpif *dpif; /* Datapath interface. */
46c88433 103 struct mac_learning *ml; /* Mac learning handle. */
6d95c4e8 104 struct mcast_snooping *ms; /* Multicast Snooping handle. */
46c88433
EJ
105 struct mbridge *mbridge; /* Mirroring. */
106 struct dpif_sflow *sflow; /* SFlow handle, or null. */
107 struct dpif_ipfix *ipfix; /* Ipfix handle, or null. */
ce3955be 108 struct netflow *netflow; /* Netflow handle, or null. */
9d189a50 109 struct stp *stp; /* STP or null if disabled. */
9efd308e 110 struct rstp *rstp; /* RSTP or null if disabled. */
46c88433 111
46c88433
EJ
112 bool has_in_band; /* Bridge has in band control? */
113 bool forward_bpdu; /* Bridge forwards STP BPDUs? */
4b97b70d 114
b440dd8c
JS
115 /* Datapath feature support. */
116 struct dpif_backer_support support;
46c88433
EJ
117};
118
119struct xbundle {
120 struct hmap_node hmap_node; /* In global 'xbundles' map. */
121 struct ofbundle *ofbundle; /* Key in global 'xbundles' map. */
122
ca6ba700 123 struct ovs_list list_node; /* In parent 'xbridges' list. */
46c88433
EJ
124 struct xbridge *xbridge; /* Parent xbridge. */
125
ca6ba700 126 struct ovs_list xports; /* Contains "struct xport"s. */
46c88433
EJ
127
128 char *name; /* Name used in log messages. */
129 struct bond *bond; /* Nonnull iff more than one port. */
130 struct lacp *lacp; /* LACP handle or null. */
131
132 enum port_vlan_mode vlan_mode; /* VLAN mode. */
fed8962a
EG
133 uint16_t qinq_ethtype; /* Ethertype of dot1q-tunnel interface
134 * either 0x8100 or 0x88a8. */
46c88433
EJ
135 int vlan; /* -1=trunk port, else a 12-bit VLAN ID. */
136 unsigned long *trunks; /* Bitmap of trunked VLANs, if 'vlan' == -1.
137 * NULL if all VLANs are trunked. */
fed8962a
EG
138 unsigned long *cvlans; /* Bitmap of allowed customer vlans,
139 * NULL if all VLANs are allowed */
46c88433
EJ
140 bool use_priority_tags; /* Use 802.1p tag for frames in VLAN 0? */
141 bool floodable; /* No port has OFPUTIL_PC_NO_FLOOD set? */
c005f976 142 bool protected; /* Protected port mode */
46c88433
EJ
143};
144
145struct xport {
146 struct hmap_node hmap_node; /* Node in global 'xports' map. */
147 struct ofport_dpif *ofport; /* Key in global 'xports map. */
148
149 struct hmap_node ofp_node; /* Node in parent xbridge 'xports' map. */
150 ofp_port_t ofp_port; /* Key in parent xbridge 'xports' map. */
151
ab108418
ZB
152 struct hmap_node uuid_node; /* Node in global 'xports_uuid' map. */
153 struct uuid uuid; /* Key in global 'xports_uuid' map. */
154
46c88433
EJ
155 odp_port_t odp_port; /* Datapath port number or ODPP_NONE. */
156
ca6ba700 157 struct ovs_list bundle_node; /* In parent xbundle (if it exists). */
46c88433
EJ
158 struct xbundle *xbundle; /* Parent xbundle or null. */
159
160 struct netdev *netdev; /* 'ofport''s netdev. */
161
162 struct xbridge *xbridge; /* Parent bridge. */
163 struct xport *peer; /* Patch port peer or null. */
164
165 enum ofputil_port_config config; /* OpenFlow port configuration. */
dd8cd4b4 166 enum ofputil_port_state state; /* OpenFlow port state. */
92cf817b 167 int stp_port_no; /* STP port number or -1 if not in use. */
f025bcb7 168 struct rstp_port *rstp_port; /* RSTP port or null. */
46c88433 169
55954f6e
EJ
170 struct hmap skb_priorities; /* Map of 'skb_priority_to_dscp's. */
171
46c88433
EJ
172 bool may_enable; /* May be enabled in bonds. */
173 bool is_tunnel; /* Is a tunnel port. */
875ab130 174 enum netdev_pt_mode pt_mode; /* packet_type handling. */
46c88433
EJ
175
176 struct cfm *cfm; /* CFM handle or null. */
177 struct bfd *bfd; /* BFD handle or null. */
0477baa9 178 struct lldp *lldp; /* LLDP handle or null. */
46c88433
EJ
179};
180
4d0acc70
EJ
181struct xlate_ctx {
182 struct xlate_in *xin;
183 struct xlate_out *xout;
184
68f515ca 185 struct xlate_cfg *xcfg;
46c88433 186 const struct xbridge *xbridge;
4d0acc70
EJ
187
188 /* Flow at the last commit. */
189 struct flow base_flow;
190
191 /* Tunnel IP destination address as received. This is stored separately
192 * as the base_flow.tunnel is cleared on init to reflect the datapath
193 * behavior. Used to make sure not to send tunneled output to ourselves,
194 * which might lead to an infinite loop. This could happen easily
195 * if a tunnel is marked as 'ip_remote=flow', and the flow does not
196 * actually set the tun_dst field. */
e4d3706c 197 struct in6_addr orig_tunnel_ipv6_dst;
4d0acc70 198
84cf3c1f
JR
199 /* Stack for the push and pop actions. See comment above nx_stack_push()
200 * in nx-match.c for info on how the stack is stored. */
4d0acc70
EJ
201 struct ofpbuf stack;
202
203 /* The rule that we are currently translating, or NULL. */
204 struct rule_dpif *rule;
205
49a73e0c
BP
206 /* Flow translation populates this with wildcards relevant in translation.
207 * When 'xin->wc' is nonnull, this is the same pointer. When 'xin->wc' is
c0e638aa 208 * null, this is a pointer to a temporary buffer. */
49a73e0c
BP
209 struct flow_wildcards *wc;
210
1520ef4f
BP
211 /* Output buffer for datapath actions. When 'xin->odp_actions' is nonnull,
212 * this is the same pointer. When 'xin->odp_actions' is null, this points
213 * to a scratch ofpbuf. This allows code to add actions to
214 * 'ctx->odp_actions' without worrying about whether the caller really
215 * wants actions. */
216 struct ofpbuf *odp_actions;
217
790c5d26
BP
218 /* Statistics maintained by xlate_table_action().
219 *
2d9b49dd 220 * These statistics limit the amount of work that a single flow
790c5d26
BP
221 * translation can perform. The goal of the first of these, 'depth', is
222 * primarily to prevent translation from performing an infinite amount of
223 * work. It counts the current depth of nested "resubmit"s (and a few
224 * other activities); when a resubmit returns, it decreases. Resubmits to
225 * tables in strictly monotonically increasing order don't contribute to
226 * 'depth' because they cannot cause a flow translation to take an infinite
227 * amount of time (because the number of tables is finite). Translation
228 * aborts when 'depth' exceeds MAX_DEPTH.
229 *
230 * 'resubmits', on the other hand, prevents flow translation from
231 * performing an extraordinarily large while still finite amount of work.
232 * It counts the total number of resubmits (and a few other activities)
233 * that have been executed. Returning from a resubmit does not affect this
234 * counter. Thus, this limits the amount of work that a particular
235 * translation can perform. Translation aborts when 'resubmits' exceeds
236 * MAX_RESUBMITS (which is much larger than MAX_DEPTH).
237 */
790c5d26 238 int depth; /* Current resubmit nesting depth. */
98b07853 239 int resubmits; /* Total number of resubmits. */
5a070238 240 bool in_group; /* Currently translating ofgroup, if true. */
029ca940 241 bool in_action_set; /* Currently translating action_set, if true. */
331c07ac
YHW
242 bool in_packet_out; /* Currently translating a packet_out msg, if
243 * true. */
1fc11c59
JS
244 bool pending_encap; /* True when waiting to commit a pending
245 * encap action. */
246 struct ofpbuf *encap_data; /* May contain a pointer to an ofpbuf with
247 * context for the datapath encap action.*/
98b07853 248
4d0acc70 249 uint8_t table_id; /* OpenFlow table ID where flow was found. */
8b1e5560
JR
250 ovs_be64 rule_cookie; /* Cookie of the rule being translated. */
251 uint32_t orig_skb_priority; /* Priority when packet arrived. */
4d0acc70 252 uint32_t sflow_n_outputs; /* Number of output ports. */
4e022ec0 253 odp_port_t sflow_odp_port; /* Output port for composing sFlow action. */
2031ef97 254 ofp_port_t nf_output_iface; /* Output interface index for NetFlow. */
4d0acc70 255 bool exit; /* No further actions should be processed. */
3d6151f3 256 mirror_mask_t mirrors; /* Bitmap of associated mirrors. */
1356dbd1 257 int mirror_snaplen; /* Max size of a mirror packet in byte. */
7fdb60a7 258
1d361a81
BP
259 /* Freezing Translation
260 * ====================
e672ff9b 261 *
1d361a81
BP
262 * At some point during translation, the code may recognize the need to halt
263 * and checkpoint the translation in a way that it can be restarted again
264 * later. We call the checkpointing process "freezing" and the restarting
265 * process "thawing".
e672ff9b 266 *
1d361a81 267 * The use cases for freezing are:
e672ff9b 268 *
1d361a81
BP
269 * - "Recirculation", where the translation process discovers that it
270 * doesn't have enough information to complete translation without
271 * actually executing the actions that have already been translated,
272 * which provides the additionally needed information. In these
273 * situations, translation freezes translation and assigns the frozen
274 * data a unique "recirculation ID", which it associates with the data
275 * in a table in userspace (see ofproto-dpif-rid.h). It also adds a
276 * OVS_ACTION_ATTR_RECIRC action specifying that ID to the datapath
277 * actions. When a packet hits that action, the datapath looks its
278 * flow up again using the ID. If there's a miss, it comes back to
279 * userspace, which find the recirculation table entry for the ID,
280 * thaws the associated frozen data, and continues translation from
281 * that point given the additional information that is now known.
e672ff9b 282 *
1d361a81
BP
283 * The archetypal example is MPLS. As MPLS is implemented in
284 * OpenFlow, the protocol that follows the last MPLS label becomes
285 * known only when that label is popped by an OpenFlow action. That
286 * means that Open vSwitch can't extract the headers beyond the MPLS
287 * labels until the pop action is executed. Thus, at that point
288 * translation uses the recirculation process to extract the headers
289 * beyond the MPLS labels.
e672ff9b 290 *
1d361a81
BP
291 * (OVS also uses OVS_ACTION_ATTR_RECIRC to implement hashing for
292 * output to bonds. OVS pre-populates all the datapath flows for bond
293 * output in the datapath, though, which means that the elaborate
294 * process of coming back to userspace for a second round of
295 * translation isn't needed, and so bonds don't follow the above
296 * process.)
e672ff9b 297 *
77ab5fd2
BP
298 * - "Continuation". A continuation is a way for an OpenFlow controller
299 * to interpose on a packet's traversal of the OpenFlow tables. When
300 * the translation process encounters a "controller" action with the
301 * "pause" flag, it freezes translation, serializes the frozen data,
302 * and sends it to an OpenFlow controller. The controller then
303 * examines and possibly modifies the frozen data and eventually sends
304 * it back to the switch, which thaws it and continues translation.
e672ff9b 305 *
1d361a81
BP
306 * The main problem of freezing translation is preserving state, so that
307 * when the translation is thawed later it resumes from where it left off,
308 * without disruption. In particular, actions must be preserved as follows:
309 *
310 * - If we're freezing because an action needed more information, the
311 * action that prompted it.
312 *
313 * - Any actions remaining to be translated within the current flow.
314 *
315 * - If translation was frozen within a NXAST_RESUBMIT, then any actions
316 * following the resubmit action. Resubmit actions can be nested, so
317 * this has to go all the way up the control stack.
e672ff9b
JR
318 *
319 * - The OpenFlow 1.1+ action set.
320 *
321 * State that actions and flow table lookups can depend on, such as the
322 * following, must also be preserved:
323 *
324 * - Metadata fields (input port, registers, OF1.1+ metadata, ...).
325 *
1d361a81 326 * - The stack used by NXAST_STACK_PUSH and NXAST_STACK_POP actions.
e672ff9b
JR
327 *
328 * - The table ID and cookie of the flow being translated at each level
1d361a81
BP
329 * of the control stack, because these can become visible through
330 * OFPAT_CONTROLLER actions (and other ways).
e672ff9b
JR
331 *
332 * Translation allows for the control of this state preservation via these
1d361a81
BP
333 * members. When a need to freeze translation is identified, the
334 * translation process:
e672ff9b 335 *
1d361a81 336 * 1. Sets 'freezing' to true.
e672ff9b
JR
337 *
338 * 2. Sets 'exit' to true to tell later steps that we're exiting from the
339 * translation process.
340 *
1d361a81
BP
341 * 3. Adds an OFPACT_UNROLL_XLATE action to 'frozen_actions', and points
342 * frozen_actions.header to the action to make it easy to find it later.
343 * This action holds the current table ID and cookie so that they can be
344 * restored during a post-recirculation upcall translation.
e672ff9b
JR
345 *
346 * 4. Adds the action that prompted recirculation and any actions following
1d361a81 347 * it within the same flow to 'frozen_actions', so that they can be
8a5fb3b4 348 * executed during a post-recirculation upcall translation.
e672ff9b
JR
349 *
350 * 5. Returns.
351 *
352 * 6. The action that prompted recirculation might be nested in a stack of
353 * nested "resubmit"s that have actions remaining. Each of these notices
1d361a81
BP
354 * that we're exiting and freezing and responds by adding more
355 * OFPACT_UNROLL_XLATE actions to 'frozen_actions', as necessary,
356 * followed by any actions that were yet unprocessed.
e672ff9b 357 *
1d361a81
BP
358 * If we're freezing because of recirculation, the caller generates a
359 * recirculation ID and associates all the state produced by this process
360 * with it. For post-recirculation upcall translation, the caller passes it
361 * back in for the new translation to execute. The process yielded a set of
362 * ofpacts that can be translated directly, so it is not much of a special
363 * case at that point.
e672ff9b 364 */
1d361a81 365 bool freezing;
53cc166a
JR
366 bool recirc_update_dp_hash; /* Generated recirculation will be preceded
367 * by datapath HASH action to get an updated
368 * dp_hash after recirculation. */
369 uint32_t dp_hash_alg;
370 uint32_t dp_hash_basis;
1d361a81 371 struct ofpbuf frozen_actions;
77ab5fd2 372 const struct ofpact_controller *pause;
e672ff9b 373
e12ec36b
SH
374 /* True if a packet was but is no longer MPLS (due to an MPLS pop action).
375 * This is a trigger for recirculation in cases where translating an action
376 * or looking up a flow requires access to the fields of the packet after
377 * the MPLS label stack that was originally present. */
378 bool was_mpls;
379
07659514
JS
380 /* True if conntrack has been performed on this packet during processing
381 * on the current bridge. This is used to determine whether conntrack
1d361a81 382 * state from the datapath should be honored after thawing. */
07659514
JS
383 bool conntracked;
384
9ac0aada
JR
385 /* Pointer to an embedded NAT action in a conntrack action, or NULL. */
386 struct ofpact_nat *ct_nat_action;
387
7fdb60a7
SH
388 /* OpenFlow 1.1+ action set.
389 *
390 * 'action_set' accumulates "struct ofpact"s added by OFPACT_WRITE_ACTIONS.
391 * When translation is otherwise complete, ofpacts_execute_action_set()
392 * converts it to a set of "struct ofpact"s that can be translated into
ed9c9e3e 393 * datapath actions. */
c61f3870 394 bool action_set_has_group; /* Action set contains OFPACT_GROUP? */
7fdb60a7 395 struct ofpbuf action_set; /* Action set. */
fff1b9c0
JR
396
397 enum xlate_error error; /* Translation failed. */
4d0acc70
EJ
398};
399
f0fb825a
EG
400/* Structure to track VLAN manipulation */
401struct xvlan_single {
402 uint16_t tpid;
403 uint16_t vid;
404 uint16_t pcp;
405};
406
407struct xvlan {
408 struct xvlan_single v[FLOW_MAX_VLAN_HEADERS];
409};
410
fff1b9c0
JR
411const char *xlate_strerror(enum xlate_error error)
412{
413 switch (error) {
414 case XLATE_OK:
415 return "OK";
416 case XLATE_BRIDGE_NOT_FOUND:
417 return "Bridge not found";
418 case XLATE_RECURSION_TOO_DEEP:
419 return "Recursion too deep";
420 case XLATE_TOO_MANY_RESUBMITS:
421 return "Too many resubmits";
422 case XLATE_STACK_TOO_DEEP:
423 return "Stack too deep";
424 case XLATE_NO_RECIRCULATION_CONTEXT:
425 return "No recirculation context";
426 case XLATE_RECIRCULATION_CONFLICT:
427 return "Recirculation conflict";
428 case XLATE_TOO_MANY_MPLS_LABELS:
429 return "Too many MPLS labels";
8d8ab6c2
JG
430 case XLATE_INVALID_TUNNEL_METADATA:
431 return "Invalid tunnel metadata";
7873e106
ZB
432 case XLATE_UNSUPPORTED_PACKET_TYPE:
433 return "Unsupported packet type";
fff1b9c0
JR
434 }
435 return "Unknown error";
436}
437
ed9c9e3e 438static void xlate_action_set(struct xlate_ctx *ctx);
704bb0bf 439static void xlate_commit_actions(struct xlate_ctx *ctx);
ed9c9e3e 440
8bdb2bdb 441static void
48f704f4
AZ
442patch_port_output(struct xlate_ctx *ctx, const struct xport *in_dev,
443 struct xport *out_dev);
8bdb2bdb 444
1d741d6d 445static void
1d361a81 446ctx_trigger_freeze(struct xlate_ctx *ctx)
1d741d6d
JR
447{
448 ctx->exit = true;
1d361a81 449 ctx->freezing = true;
1d741d6d
JR
450}
451
53cc166a
JR
452static void
453ctx_trigger_recirculate_with_hash(struct xlate_ctx *ctx, uint32_t type,
454 uint32_t basis)
455{
456 ctx->exit = true;
457 ctx->freezing = true;
458 ctx->recirc_update_dp_hash = true;
459 ctx->dp_hash_alg = type;
460 ctx->dp_hash_basis = basis;
461}
462
1d741d6d 463static bool
1d361a81 464ctx_first_frozen_action(const struct xlate_ctx *ctx)
1d741d6d 465{
1d361a81 466 return !ctx->frozen_actions.size;
e672ff9b
JR
467}
468
3293cb85 469static void
1d361a81 470ctx_cancel_freeze(struct xlate_ctx *ctx)
3293cb85 471{
1d361a81
BP
472 if (ctx->freezing) {
473 ctx->freezing = false;
53cc166a 474 ctx->recirc_update_dp_hash = false;
1d361a81
BP
475 ofpbuf_clear(&ctx->frozen_actions);
476 ctx->frozen_actions.header = NULL;
3293cb85
BP
477 }
478}
479
77ab5fd2 480static void finish_freezing(struct xlate_ctx *ctx);
e672ff9b 481
9583bc14
EJ
482/* A controller may use OFPP_NONE as the ingress port to indicate that
483 * it did not arrive on a "real" port. 'ofpp_none_bundle' exists for
484 * when an input bundle is needed for validation (e.g., mirroring or
485 * OFPP_NORMAL processing). It is not connected to an 'ofproto' or have
3548d242
BP
486 * any 'port' structs, so care must be taken when dealing with it. */
487static struct xbundle ofpp_none_bundle = {
488 .name = "OFPP_NONE",
489 .vlan_mode = PORT_VLAN_TRUNK
490};
9583bc14 491
55954f6e
EJ
492/* Node in 'xport''s 'skb_priorities' map. Used to maintain a map from
493 * 'priority' (the datapath's term for QoS queue) to the dscp bits which all
494 * traffic egressing the 'ofport' with that priority should be marked with. */
495struct skb_priority_to_dscp {
496 struct hmap_node hmap_node; /* Node in 'ofport_dpif''s 'skb_priorities'. */
497 uint32_t skb_priority; /* Priority of this queue (see struct flow). */
498
499 uint8_t dscp; /* DSCP bits to mark outgoing traffic with. */
500};
501
84f0f298
RW
502/* Xlate config contains hash maps of all bridges, bundles and ports.
503 * Xcfgp contains the pointer to the current xlate configuration.
504 * When the main thread needs to change the configuration, it copies xcfgp to
505 * new_xcfg and edits new_xcfg. This enables the use of RCU locking which
506 * does not block handler and revalidator threads. */
507struct xlate_cfg {
508 struct hmap xbridges;
509 struct hmap xbundles;
510 struct hmap xports;
ab108418 511 struct hmap xports_uuid;
84f0f298 512};
b1b72f2d 513static OVSRCU_TYPE(struct xlate_cfg *) xcfgp = OVSRCU_INITIALIZER(NULL);
f439f23b 514static struct xlate_cfg *new_xcfg = NULL;
46c88433 515
96c3a6e5
AZ
516typedef void xlate_actions_handler(const struct ofpact *, size_t ofpacts_len,
517 struct xlate_ctx *, bool);
46c88433 518static bool may_receive(const struct xport *, struct xlate_ctx *);
9583bc14 519static void do_xlate_actions(const struct ofpact *, size_t ofpacts_len,
feee58b9 520 struct xlate_ctx *, bool);
96c3a6e5
AZ
521static void clone_xlate_actions(const struct ofpact *, size_t ofpacts_len,
522 struct xlate_ctx *, bool);
adcf00ba 523static void xlate_normal(struct xlate_ctx *);
6d328fa2
SH
524static void xlate_table_action(struct xlate_ctx *, ofp_port_t in_port,
525 uint8_t table_id, bool may_packet_in,
feee58b9 526 bool honor_table_miss, bool with_ct_orig,
96c3a6e5
AZ
527 bool is_last_action, xlate_actions_handler *);
528
2d9b49dd
BP
529static bool input_vid_is_valid(const struct xlate_ctx *,
530 uint16_t vid, struct xbundle *);
f0fb825a
EG
531static void xvlan_copy(struct xvlan *dst, const struct xvlan *src);
532static void xvlan_pop(struct xvlan *src);
fed8962a 533static void xvlan_push_uninit(struct xvlan *src);
f0fb825a
EG
534static void xvlan_extract(const struct flow *, struct xvlan *);
535static void xvlan_put(struct flow *, const struct xvlan *);
536static void xvlan_input_translate(const struct xbundle *,
537 const struct xvlan *in,
538 struct xvlan *xvlan);
539static void xvlan_output_translate(const struct xbundle *,
540 const struct xvlan *xvlan,
541 struct xvlan *out);
46c88433 542static void output_normal(struct xlate_ctx *, const struct xbundle *,
f0fb825a 543 const struct xvlan *);
e93ef1c7
JR
544
545/* Optional bond recirculation parameter to compose_output_action(). */
546struct xlate_bond_recirc {
547 uint32_t recirc_id; /* !0 Use recirculation instead of output. */
548 uint8_t hash_alg; /* !0 Compute hash for recirc before. */
549 uint32_t hash_basis; /* Compute hash for recirc before. */
550};
551
552static void compose_output_action(struct xlate_ctx *, ofp_port_t ofp_port,
feee58b9 553 const struct xlate_bond_recirc *xr,
11938578 554 bool is_last_action, bool truncate);
9583bc14 555
84f0f298
RW
556static struct xbridge *xbridge_lookup(struct xlate_cfg *,
557 const struct ofproto_dpif *);
290835f9
BP
558static struct xbridge *xbridge_lookup_by_uuid(struct xlate_cfg *,
559 const struct uuid *);
84f0f298
RW
560static struct xbundle *xbundle_lookup(struct xlate_cfg *,
561 const struct ofbundle *);
562static struct xport *xport_lookup(struct xlate_cfg *,
563 const struct ofport_dpif *);
43e73536
ZB
564static struct xport *xport_lookup_by_uuid(struct xlate_cfg *,
565 const struct uuid *);
46c88433 566static struct xport *get_ofp_port(const struct xbridge *, ofp_port_t ofp_port);
55954f6e
EJ
567static struct skb_priority_to_dscp *get_skb_priority(const struct xport *,
568 uint32_t skb_priority);
569static void clear_skb_priorities(struct xport *);
16194afd 570static size_t count_skb_priorities(const struct xport *);
55954f6e
EJ
571static bool dscp_from_skb_priority(const struct xport *, uint32_t skb_priority,
572 uint8_t *dscp);
46c88433 573
84f0f298
RW
574static void xlate_xbridge_init(struct xlate_cfg *, struct xbridge *);
575static void xlate_xbundle_init(struct xlate_cfg *, struct xbundle *);
576static void xlate_xport_init(struct xlate_cfg *, struct xport *);
9efd308e 577static void xlate_xbridge_set(struct xbridge *, struct dpif *,
9efd308e
DV
578 const struct mac_learning *, struct stp *,
579 struct rstp *, const struct mcast_snooping *,
580 const struct mbridge *,
581 const struct dpif_sflow *,
582 const struct dpif_ipfix *,
2f47cdf4 583 const struct netflow *,
84f0f298 584 bool forward_bpdu, bool has_in_band,
b440dd8c 585 const struct dpif_backer_support *);
84f0f298 586static void xlate_xbundle_set(struct xbundle *xbundle,
f0fb825a 587 enum port_vlan_mode vlan_mode,
fed8962a
EG
588 uint16_t qinq_ethtype, int vlan,
589 unsigned long *trunks, unsigned long *cvlans,
f0fb825a 590 bool use_priority_tags,
84f0f298 591 const struct bond *bond, const struct lacp *lacp,
c005f976 592 bool floodable, bool protected);
84f0f298
RW
593static void xlate_xport_set(struct xport *xport, odp_port_t odp_port,
594 const struct netdev *netdev, const struct cfm *cfm,
0477baa9
DF
595 const struct bfd *bfd, const struct lldp *lldp,
596 int stp_port_no, const struct rstp_port *rstp_port,
84f0f298
RW
597 enum ofputil_port_config config,
598 enum ofputil_port_state state, bool is_tunnel,
599 bool may_enable);
600static void xlate_xbridge_remove(struct xlate_cfg *, struct xbridge *);
601static void xlate_xbundle_remove(struct xlate_cfg *, struct xbundle *);
602static void xlate_xport_remove(struct xlate_cfg *, struct xport *);
603static void xlate_xbridge_copy(struct xbridge *);
604static void xlate_xbundle_copy(struct xbridge *, struct xbundle *);
605static void xlate_xport_copy(struct xbridge *, struct xbundle *,
606 struct xport *);
607static void xlate_xcfg_free(struct xlate_cfg *);
2d9b49dd
BP
608\f
609/* Tracing helpers. */
610
611/* If tracing is enabled in 'ctx', creates a new trace node and appends it to
612 * the list of nodes maintained in ctx->xin. The new node has type 'type' and
613 * its text is created from 'format' by treating it as a printf format string.
614 * Returns the list of nodes embedded within the new trace node; ordinarily,
615 * the calleer can ignore this, but it is useful if the caller needs to nest
616 * more trace nodes within the new node.
617 *
618 * If tracing is not enabled, does nothing and returns NULL. */
619static struct ovs_list * OVS_PRINTF_FORMAT(3, 4)
620xlate_report(const struct xlate_ctx *ctx, enum oftrace_node_type type,
621 const char *format, ...)
34dd0d78 622{
2d9b49dd
BP
623 struct ovs_list *subtrace = NULL;
624 if (OVS_UNLIKELY(ctx->xin->trace)) {
c1b3756c 625 va_list args;
c1b3756c 626 va_start(args, format);
2d9b49dd
BP
627 char *text = xvasprintf(format, args);
628 subtrace = &oftrace_report(ctx->xin->trace, type, text)->subs;
c1b3756c 629 va_end(args);
2d9b49dd 630 free(text);
34dd0d78 631 }
2d9b49dd 632 return subtrace;
34dd0d78 633}
84f0f298 634
2d9b49dd
BP
635/* This is like xlate_report() for errors that are serious enough that we
636 * should log them even if we are not tracing. */
637static void OVS_PRINTF_FORMAT(2, 3)
638xlate_report_error(const struct xlate_ctx *ctx, const char *format, ...)
639{
640 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
641 if (!OVS_UNLIKELY(ctx->xin->trace)
642 && (!ctx->xin->packet || VLOG_DROP_WARN(&rl))) {
643 return;
644 }
645
646 struct ds s = DS_EMPTY_INITIALIZER;
647 va_list args;
648 va_start(args, format);
649 ds_put_format_valist(&s, format, args);
650 va_end(args);
651
652 if (ctx->xin->trace) {
653 oftrace_report(ctx->xin->trace, OFT_ERROR, ds_cstr(&s));
654 } else {
655 ds_put_cstr(&s, " while processing ");
50f96b10 656 flow_format(&s, &ctx->base_flow, NULL);
2d9b49dd
BP
657 ds_put_format(&s, " on bridge %s", ctx->xbridge->name);
658 VLOG_WARN("%s", ds_cstr(&s));
659 }
660 ds_destroy(&s);
661}
662
663/* This is like xlate_report() for messages that should be logged at debug
664 * level (even if we are not tracing) because they can be valuable for
665 * debugging. */
666static void OVS_PRINTF_FORMAT(3, 4)
667xlate_report_debug(const struct xlate_ctx *ctx, enum oftrace_node_type type,
668 const char *format, ...)
669{
670 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(30, 300);
671 if (!OVS_UNLIKELY(ctx->xin->trace)
672 && (!ctx->xin->packet || VLOG_DROP_DBG(&rl))) {
673 return;
674 }
675
676 struct ds s = DS_EMPTY_INITIALIZER;
677 va_list args;
678 va_start(args, format);
679 ds_put_format_valist(&s, format, args);
680 va_end(args);
fff1b9c0 681
2d9b49dd
BP
682 if (ctx->xin->trace) {
683 oftrace_report(ctx->xin->trace, type, ds_cstr(&s));
684 } else {
685 VLOG_DBG("bridge %s: %s", ctx->xbridge->name, ds_cstr(&s));
686 }
687 ds_destroy(&s);
688}
fff1b9c0 689
2d9b49dd
BP
690/* If tracing is enabled in 'ctx', appends a node of the given 'type' to the
691 * trace, whose text is 'title' followed by a formatted version of the
692 * 'ofpacts_len' OpenFlow actions in 'ofpacts'.
693 *
694 * If tracing is not enabled, does nothing. */
695static void
696xlate_report_actions(const struct xlate_ctx *ctx, enum oftrace_node_type type,
697 const char *title,
d6bef3cc
BP
698 const struct ofpact *ofpacts, size_t ofpacts_len)
699{
2d9b49dd 700 if (OVS_UNLIKELY(ctx->xin->trace)) {
d6bef3cc 701 struct ds s = DS_EMPTY_INITIALIZER;
2d9b49dd 702 ds_put_format(&s, "%s: ", title);
50f96b10 703 ofpacts_format(ofpacts, ofpacts_len, NULL, &s);
2d9b49dd 704 oftrace_report(ctx->xin->trace, type, ds_cstr(&s));
d6bef3cc
BP
705 ds_destroy(&s);
706 }
707}
708
2d9b49dd
BP
709/* If tracing is enabled in 'ctx', appends a node of type OFT_DETAIL to the
710 * trace, whose the message is a formatted version of the OpenFlow action set.
711 * 'verb' should be "was" or "is", depending on whether the action set reported
712 * is the new action set or the old one.
713 *
714 * If tracing is not enabled, does nothing. */
715static void
716xlate_report_action_set(const struct xlate_ctx *ctx, const char *verb)
717{
718 if (OVS_UNLIKELY(ctx->xin->trace)) {
719 struct ofpbuf action_list;
720 ofpbuf_init(&action_list, 0);
721 ofpacts_execute_action_set(&action_list, &ctx->action_set);
722 if (action_list.size) {
723 struct ds s = DS_EMPTY_INITIALIZER;
50f96b10 724 ofpacts_format(action_list.data, action_list.size, NULL, &s);
2d9b49dd
BP
725 xlate_report(ctx, OFT_DETAIL, "action set %s: %s",
726 verb, ds_cstr(&s));
727 ds_destroy(&s);
728 } else {
729 xlate_report(ctx, OFT_DETAIL, "action set %s empty", verb);
730 }
731 ofpbuf_uninit(&action_list);
732 }
733}
734
735
736/* If tracing is enabled in 'ctx', appends a node representing 'rule' (in
737 * OpenFlow table 'table_id') to the trace and makes this node the parent for
738 * future trace nodes. The caller should save ctx->xin->trace before calling
739 * this function, then after tracing all of the activities under the table,
740 * restore its previous value.
741 *
742 * If tracing is not enabled, does nothing. */
743static void
744xlate_report_table(const struct xlate_ctx *ctx, struct rule_dpif *rule,
745 uint8_t table_id)
746{
747 if (OVS_LIKELY(!ctx->xin->trace)) {
748 return;
749 }
750
751 struct ds s = DS_EMPTY_INITIALIZER;
752 ds_put_format(&s, "%2d. ", table_id);
753 if (rule == ctx->xin->ofproto->miss_rule) {
754 ds_put_cstr(&s, "No match, and a \"packet-in\" is called for.");
755 } else if (rule == ctx->xin->ofproto->no_packet_in_rule) {
756 ds_put_cstr(&s, "No match.");
757 } else if (rule == ctx->xin->ofproto->drop_frags_rule) {
758 ds_put_cstr(&s, "Packets are IP fragments and "
759 "the fragment handling mode is \"drop\".");
760 } else {
761 minimatch_format(&rule->up.cr.match,
762 ofproto_get_tun_tab(&ctx->xin->ofproto->up),
50f96b10 763 NULL, &s, OFP_DEFAULT_PRIORITY);
2d9b49dd
BP
764 if (ds_last(&s) != ' ') {
765 ds_put_cstr(&s, ", ");
766 }
767 ds_put_format(&s, "priority %d", rule->up.cr.priority);
768 if (rule->up.flow_cookie) {
769 ds_put_format(&s, ", cookie %#"PRIx64,
770 ntohll(rule->up.flow_cookie));
771 }
772 }
773 ctx->xin->trace = &oftrace_report(ctx->xin->trace, OFT_TABLE,
774 ds_cstr(&s))->subs;
775 ds_destroy(&s);
776}
777
778/* If tracing is enabled in 'ctx', adds an OFT_DETAIL trace node to 'ctx'
779 * reporting the value of subfield 'sf'.
780 *
781 * If tracing is not enabled, does nothing. */
782static void
783xlate_report_subfield(const struct xlate_ctx *ctx,
784 const struct mf_subfield *sf)
785{
786 if (OVS_UNLIKELY(ctx->xin->trace)) {
787 struct ds s = DS_EMPTY_INITIALIZER;
788 mf_format_subfield(sf, &s);
789 ds_put_cstr(&s, " is now ");
790
791 if (sf->ofs == 0 && sf->n_bits >= sf->field->n_bits) {
792 union mf_value value;
793 mf_get_value(sf->field, &ctx->xin->flow, &value);
50f96b10 794 mf_format(sf->field, &value, NULL, NULL, &s);
2d9b49dd
BP
795 } else {
796 union mf_subvalue cst;
797 mf_read_subfield(sf, &ctx->xin->flow, &cst);
798 ds_put_hex(&s, &cst, sizeof cst);
799 }
800
801 xlate_report(ctx, OFT_DETAIL, "%s", ds_cstr(&s));
802
803 ds_destroy(&s);
804 }
805}
806\f
84f0f298
RW
807static void
808xlate_xbridge_init(struct xlate_cfg *xcfg, struct xbridge *xbridge)
809{
417e7e66 810 ovs_list_init(&xbridge->xbundles);
84f0f298
RW
811 hmap_init(&xbridge->xports);
812 hmap_insert(&xcfg->xbridges, &xbridge->hmap_node,
813 hash_pointer(xbridge->ofproto, 0));
814}
815
816static void
817xlate_xbundle_init(struct xlate_cfg *xcfg, struct xbundle *xbundle)
818{
417e7e66
BW
819 ovs_list_init(&xbundle->xports);
820 ovs_list_insert(&xbundle->xbridge->xbundles, &xbundle->list_node);
84f0f298
RW
821 hmap_insert(&xcfg->xbundles, &xbundle->hmap_node,
822 hash_pointer(xbundle->ofbundle, 0));
823}
824
825static void
826xlate_xport_init(struct xlate_cfg *xcfg, struct xport *xport)
827{
828 hmap_init(&xport->skb_priorities);
829 hmap_insert(&xcfg->xports, &xport->hmap_node,
830 hash_pointer(xport->ofport, 0));
831 hmap_insert(&xport->xbridge->xports, &xport->ofp_node,
832 hash_ofp_port(xport->ofp_port));
ab108418
ZB
833 hmap_insert(&xcfg->xports_uuid, &xport->uuid_node,
834 uuid_hash(&xport->uuid));
84f0f298
RW
835}
836
837static void
838xlate_xbridge_set(struct xbridge *xbridge,
839 struct dpif *dpif,
ec89fc6f 840 const struct mac_learning *ml, struct stp *stp,
9efd308e 841 struct rstp *rstp, const struct mcast_snooping *ms,
ec89fc6f 842 const struct mbridge *mbridge,
46c88433 843 const struct dpif_sflow *sflow,
ce3955be 844 const struct dpif_ipfix *ipfix,
2f47cdf4 845 const struct netflow *netflow,
4b97b70d 846 bool forward_bpdu, bool has_in_band,
b440dd8c 847 const struct dpif_backer_support *support)
46c88433 848{
46c88433
EJ
849 if (xbridge->ml != ml) {
850 mac_learning_unref(xbridge->ml);
851 xbridge->ml = mac_learning_ref(ml);
852 }
853
6d95c4e8
FL
854 if (xbridge->ms != ms) {
855 mcast_snooping_unref(xbridge->ms);
856 xbridge->ms = mcast_snooping_ref(ms);
857 }
858
46c88433
EJ
859 if (xbridge->mbridge != mbridge) {
860 mbridge_unref(xbridge->mbridge);
861 xbridge->mbridge = mbridge_ref(mbridge);
862 }
863
864 if (xbridge->sflow != sflow) {
865 dpif_sflow_unref(xbridge->sflow);
866 xbridge->sflow = dpif_sflow_ref(sflow);
867 }
868
869 if (xbridge->ipfix != ipfix) {
870 dpif_ipfix_unref(xbridge->ipfix);
871 xbridge->ipfix = dpif_ipfix_ref(ipfix);
872 }
873
9d189a50
EJ
874 if (xbridge->stp != stp) {
875 stp_unref(xbridge->stp);
876 xbridge->stp = stp_ref(stp);
877 }
878
9efd308e
DV
879 if (xbridge->rstp != rstp) {
880 rstp_unref(xbridge->rstp);
881 xbridge->rstp = rstp_ref(rstp);
882 }
883
ce3955be
EJ
884 if (xbridge->netflow != netflow) {
885 netflow_unref(xbridge->netflow);
886 xbridge->netflow = netflow_ref(netflow);
887 }
888
89a8a7f0 889 xbridge->dpif = dpif;
46c88433
EJ
890 xbridge->forward_bpdu = forward_bpdu;
891 xbridge->has_in_band = has_in_band;
b440dd8c 892 xbridge->support = *support;
46c88433
EJ
893}
894
84f0f298
RW
895static void
896xlate_xbundle_set(struct xbundle *xbundle,
fed8962a
EG
897 enum port_vlan_mode vlan_mode, uint16_t qinq_ethtype,
898 int vlan, unsigned long *trunks, unsigned long *cvlans,
f0fb825a 899 bool use_priority_tags,
84f0f298 900 const struct bond *bond, const struct lacp *lacp,
c005f976 901 bool floodable, bool protected)
84f0f298
RW
902{
903 ovs_assert(xbundle->xbridge);
904
905 xbundle->vlan_mode = vlan_mode;
fed8962a 906 xbundle->qinq_ethtype = qinq_ethtype;
84f0f298
RW
907 xbundle->vlan = vlan;
908 xbundle->trunks = trunks;
fed8962a 909 xbundle->cvlans = cvlans;
84f0f298
RW
910 xbundle->use_priority_tags = use_priority_tags;
911 xbundle->floodable = floodable;
c005f976 912 xbundle->protected = protected;
84f0f298
RW
913
914 if (xbundle->bond != bond) {
915 bond_unref(xbundle->bond);
916 xbundle->bond = bond_ref(bond);
917 }
918
919 if (xbundle->lacp != lacp) {
920 lacp_unref(xbundle->lacp);
921 xbundle->lacp = lacp_ref(lacp);
922 }
923}
924
925static void
926xlate_xport_set(struct xport *xport, odp_port_t odp_port,
927 const struct netdev *netdev, const struct cfm *cfm,
0477baa9 928 const struct bfd *bfd, const struct lldp *lldp, int stp_port_no,
f025bcb7 929 const struct rstp_port* rstp_port,
84f0f298
RW
930 enum ofputil_port_config config, enum ofputil_port_state state,
931 bool is_tunnel, bool may_enable)
932{
933 xport->config = config;
934 xport->state = state;
935 xport->stp_port_no = stp_port_no;
936 xport->is_tunnel = is_tunnel;
875ab130 937 xport->pt_mode = netdev_get_pt_mode(netdev);
84f0f298
RW
938 xport->may_enable = may_enable;
939 xport->odp_port = odp_port;
940
f025bcb7
JR
941 if (xport->rstp_port != rstp_port) {
942 rstp_port_unref(xport->rstp_port);
943 xport->rstp_port = rstp_port_ref(rstp_port);
944 }
945
84f0f298
RW
946 if (xport->cfm != cfm) {
947 cfm_unref(xport->cfm);
948 xport->cfm = cfm_ref(cfm);
949 }
950
951 if (xport->bfd != bfd) {
952 bfd_unref(xport->bfd);
953 xport->bfd = bfd_ref(bfd);
954 }
955
0477baa9
DF
956 if (xport->lldp != lldp) {
957 lldp_unref(xport->lldp);
958 xport->lldp = lldp_ref(lldp);
959 }
960
84f0f298
RW
961 if (xport->netdev != netdev) {
962 netdev_close(xport->netdev);
963 xport->netdev = netdev_ref(netdev);
964 }
965}
966
967static void
968xlate_xbridge_copy(struct xbridge *xbridge)
969{
970 struct xbundle *xbundle;
971 struct xport *xport;
972 struct xbridge *new_xbridge = xzalloc(sizeof *xbridge);
973 new_xbridge->ofproto = xbridge->ofproto;
974 new_xbridge->name = xstrdup(xbridge->name);
975 xlate_xbridge_init(new_xcfg, new_xbridge);
976
977 xlate_xbridge_set(new_xbridge,
34dd0d78 978 xbridge->dpif, xbridge->ml, xbridge->stp,
9efd308e
DV
979 xbridge->rstp, xbridge->ms, xbridge->mbridge,
980 xbridge->sflow, xbridge->ipfix, xbridge->netflow,
b440dd8c
JS
981 xbridge->forward_bpdu, xbridge->has_in_band,
982 &xbridge->support);
84f0f298
RW
983 LIST_FOR_EACH (xbundle, list_node, &xbridge->xbundles) {
984 xlate_xbundle_copy(new_xbridge, xbundle);
985 }
986
987 /* Copy xports which are not part of a xbundle */
988 HMAP_FOR_EACH (xport, ofp_node, &xbridge->xports) {
989 if (!xport->xbundle) {
990 xlate_xport_copy(new_xbridge, NULL, xport);
991 }
992 }
993}
994
995static void
996xlate_xbundle_copy(struct xbridge *xbridge, struct xbundle *xbundle)
997{
998 struct xport *xport;
999 struct xbundle *new_xbundle = xzalloc(sizeof *xbundle);
1000 new_xbundle->ofbundle = xbundle->ofbundle;
1001 new_xbundle->xbridge = xbridge;
1002 new_xbundle->name = xstrdup(xbundle->name);
1003 xlate_xbundle_init(new_xcfg, new_xbundle);
1004
fed8962a
EG
1005 xlate_xbundle_set(new_xbundle, xbundle->vlan_mode, xbundle->qinq_ethtype,
1006 xbundle->vlan, xbundle->trunks, xbundle->cvlans,
84f0f298 1007 xbundle->use_priority_tags, xbundle->bond, xbundle->lacp,
c005f976 1008 xbundle->floodable, xbundle->protected);
84f0f298
RW
1009 LIST_FOR_EACH (xport, bundle_node, &xbundle->xports) {
1010 xlate_xport_copy(xbridge, new_xbundle, xport);
1011 }
1012}
1013
1014static void
1015xlate_xport_copy(struct xbridge *xbridge, struct xbundle *xbundle,
1016 struct xport *xport)
1017{
1018 struct skb_priority_to_dscp *pdscp, *new_pdscp;
1019 struct xport *new_xport = xzalloc(sizeof *xport);
1020 new_xport->ofport = xport->ofport;
1021 new_xport->ofp_port = xport->ofp_port;
1022 new_xport->xbridge = xbridge;
ab108418 1023 new_xport->uuid = xport->uuid;
84f0f298
RW
1024 xlate_xport_init(new_xcfg, new_xport);
1025
1026 xlate_xport_set(new_xport, xport->odp_port, xport->netdev, xport->cfm,
0477baa9
DF
1027 xport->bfd, xport->lldp, xport->stp_port_no,
1028 xport->rstp_port, xport->config, xport->state,
1029 xport->is_tunnel, xport->may_enable);
84f0f298
RW
1030
1031 if (xport->peer) {
1032 struct xport *peer = xport_lookup(new_xcfg, xport->peer->ofport);
1033 if (peer) {
1034 new_xport->peer = peer;
1035 new_xport->peer->peer = new_xport;
1036 }
1037 }
1038
1039 if (xbundle) {
1040 new_xport->xbundle = xbundle;
417e7e66 1041 ovs_list_insert(&new_xport->xbundle->xports, &new_xport->bundle_node);
84f0f298
RW
1042 }
1043
1044 HMAP_FOR_EACH (pdscp, hmap_node, &xport->skb_priorities) {
1045 new_pdscp = xmalloc(sizeof *pdscp);
1046 new_pdscp->skb_priority = pdscp->skb_priority;
1047 new_pdscp->dscp = pdscp->dscp;
1048 hmap_insert(&new_xport->skb_priorities, &new_pdscp->hmap_node,
1049 hash_int(new_pdscp->skb_priority, 0));
1050 }
1051}
1052
1053/* Sets the current xlate configuration to new_xcfg and frees the old xlate
1054 * configuration in xcfgp.
1055 *
1056 * This needs to be called after editing the xlate configuration.
1057 *
1058 * Functions that edit the new xlate configuration are
6cd20a22 1059 * xlate_<ofproto/bundle/ofport>_set and xlate_<ofproto/bundle/ofport>_remove.
84f0f298
RW
1060 *
1061 * A sample workflow:
1062 *
1063 * xlate_txn_start();
1064 * ...
1065 * edit_xlate_configuration();
1066 * ...
1067 * xlate_txn_commit(); */
46c88433 1068void
84f0f298
RW
1069xlate_txn_commit(void)
1070{
1071 struct xlate_cfg *xcfg = ovsrcu_get(struct xlate_cfg *, &xcfgp);
1072
1073 ovsrcu_set(&xcfgp, new_xcfg);
40a9c4c2
AW
1074 ovsrcu_synchronize();
1075 xlate_xcfg_free(xcfg);
84f0f298
RW
1076 new_xcfg = NULL;
1077}
1078
1079/* Copies the current xlate configuration in xcfgp to new_xcfg.
1080 *
1081 * This needs to be called prior to editing the xlate configuration. */
1082void
1083xlate_txn_start(void)
1084{
1085 struct xbridge *xbridge;
1086 struct xlate_cfg *xcfg;
1087
1088 ovs_assert(!new_xcfg);
1089
1090 new_xcfg = xmalloc(sizeof *new_xcfg);
1091 hmap_init(&new_xcfg->xbridges);
1092 hmap_init(&new_xcfg->xbundles);
1093 hmap_init(&new_xcfg->xports);
ab108418 1094 hmap_init(&new_xcfg->xports_uuid);
84f0f298
RW
1095
1096 xcfg = ovsrcu_get(struct xlate_cfg *, &xcfgp);
1097 if (!xcfg) {
1098 return;
1099 }
1100
1101 HMAP_FOR_EACH (xbridge, hmap_node, &xcfg->xbridges) {
1102 xlate_xbridge_copy(xbridge);
1103 }
1104}
1105
1106
1107static void
1108xlate_xcfg_free(struct xlate_cfg *xcfg)
1109{
1110 struct xbridge *xbridge, *next_xbridge;
1111
1112 if (!xcfg) {
1113 return;
1114 }
1115
1116 HMAP_FOR_EACH_SAFE (xbridge, next_xbridge, hmap_node, &xcfg->xbridges) {
1117 xlate_xbridge_remove(xcfg, xbridge);
1118 }
1119
1120 hmap_destroy(&xcfg->xbridges);
1121 hmap_destroy(&xcfg->xbundles);
1122 hmap_destroy(&xcfg->xports);
ab108418 1123 hmap_destroy(&xcfg->xports_uuid);
84f0f298
RW
1124 free(xcfg);
1125}
1126
1127void
1128xlate_ofproto_set(struct ofproto_dpif *ofproto, const char *name,
34dd0d78 1129 struct dpif *dpif,
84f0f298 1130 const struct mac_learning *ml, struct stp *stp,
9efd308e 1131 struct rstp *rstp, const struct mcast_snooping *ms,
84f0f298
RW
1132 const struct mbridge *mbridge,
1133 const struct dpif_sflow *sflow,
1134 const struct dpif_ipfix *ipfix,
2f47cdf4 1135 const struct netflow *netflow,
b440dd8c
JS
1136 bool forward_bpdu, bool has_in_band,
1137 const struct dpif_backer_support *support)
84f0f298
RW
1138{
1139 struct xbridge *xbridge;
1140
1141 ovs_assert(new_xcfg);
1142
1143 xbridge = xbridge_lookup(new_xcfg, ofproto);
1144 if (!xbridge) {
1145 xbridge = xzalloc(sizeof *xbridge);
1146 xbridge->ofproto = ofproto;
1147
1148 xlate_xbridge_init(new_xcfg, xbridge);
1149 }
1150
1151 free(xbridge->name);
1152 xbridge->name = xstrdup(name);
1153
34dd0d78 1154 xlate_xbridge_set(xbridge, dpif, ml, stp, rstp, ms, mbridge, sflow, ipfix,
b440dd8c 1155 netflow, forward_bpdu, has_in_band, support);
84f0f298
RW
1156}
1157
1158static void
1159xlate_xbridge_remove(struct xlate_cfg *xcfg, struct xbridge *xbridge)
46c88433 1160{
46c88433
EJ
1161 struct xbundle *xbundle, *next_xbundle;
1162 struct xport *xport, *next_xport;
1163
1164 if (!xbridge) {
1165 return;
1166 }
1167
1168 HMAP_FOR_EACH_SAFE (xport, next_xport, ofp_node, &xbridge->xports) {
84f0f298 1169 xlate_xport_remove(xcfg, xport);
46c88433
EJ
1170 }
1171
1172 LIST_FOR_EACH_SAFE (xbundle, next_xbundle, list_node, &xbridge->xbundles) {
84f0f298 1173 xlate_xbundle_remove(xcfg, xbundle);
46c88433
EJ
1174 }
1175
84f0f298 1176 hmap_remove(&xcfg->xbridges, &xbridge->hmap_node);
795cc5c1 1177 mac_learning_unref(xbridge->ml);
6d95c4e8 1178 mcast_snooping_unref(xbridge->ms);
795cc5c1
EJ
1179 mbridge_unref(xbridge->mbridge);
1180 dpif_sflow_unref(xbridge->sflow);
1181 dpif_ipfix_unref(xbridge->ipfix);
3570f7e4 1182 netflow_unref(xbridge->netflow);
795cc5c1 1183 stp_unref(xbridge->stp);
9efd308e 1184 rstp_unref(xbridge->rstp);
795cc5c1 1185 hmap_destroy(&xbridge->xports);
46c88433
EJ
1186 free(xbridge->name);
1187 free(xbridge);
1188}
1189
84f0f298
RW
1190void
1191xlate_remove_ofproto(struct ofproto_dpif *ofproto)
1192{
1193 struct xbridge *xbridge;
1194
1195 ovs_assert(new_xcfg);
1196
1197 xbridge = xbridge_lookup(new_xcfg, ofproto);
1198 xlate_xbridge_remove(new_xcfg, xbridge);
1199}
1200
46c88433
EJ
1201void
1202xlate_bundle_set(struct ofproto_dpif *ofproto, struct ofbundle *ofbundle,
f0fb825a 1203 const char *name, enum port_vlan_mode vlan_mode,
fed8962a
EG
1204 uint16_t qinq_ethtype, int vlan,
1205 unsigned long *trunks, unsigned long *cvlans,
f0fb825a 1206 bool use_priority_tags,
46c88433 1207 const struct bond *bond, const struct lacp *lacp,
c005f976 1208 bool floodable, bool protected)
46c88433 1209{
84f0f298 1210 struct xbundle *xbundle;
46c88433 1211
84f0f298
RW
1212 ovs_assert(new_xcfg);
1213
1214 xbundle = xbundle_lookup(new_xcfg, ofbundle);
46c88433
EJ
1215 if (!xbundle) {
1216 xbundle = xzalloc(sizeof *xbundle);
1217 xbundle->ofbundle = ofbundle;
84f0f298 1218 xbundle->xbridge = xbridge_lookup(new_xcfg, ofproto);
46c88433 1219
84f0f298 1220 xlate_xbundle_init(new_xcfg, xbundle);
46c88433
EJ
1221 }
1222
46c88433
EJ
1223 free(xbundle->name);
1224 xbundle->name = xstrdup(name);
1225
fed8962a 1226 xlate_xbundle_set(xbundle, vlan_mode, qinq_ethtype, vlan, trunks, cvlans,
c005f976 1227 use_priority_tags, bond, lacp, floodable, protected);
46c88433
EJ
1228}
1229
84f0f298
RW
1230static void
1231xlate_xbundle_remove(struct xlate_cfg *xcfg, struct xbundle *xbundle)
46c88433 1232{
5f03c983 1233 struct xport *xport;
46c88433
EJ
1234
1235 if (!xbundle) {
1236 return;
1237 }
1238
5f03c983 1239 LIST_FOR_EACH_POP (xport, bundle_node, &xbundle->xports) {
46c88433
EJ
1240 xport->xbundle = NULL;
1241 }
1242
84f0f298 1243 hmap_remove(&xcfg->xbundles, &xbundle->hmap_node);
417e7e66 1244 ovs_list_remove(&xbundle->list_node);
46c88433
EJ
1245 bond_unref(xbundle->bond);
1246 lacp_unref(xbundle->lacp);
1247 free(xbundle->name);
1248 free(xbundle);
1249}
1250
84f0f298
RW
1251void
1252xlate_bundle_remove(struct ofbundle *ofbundle)
1253{
1254 struct xbundle *xbundle;
1255
1256 ovs_assert(new_xcfg);
1257
1258 xbundle = xbundle_lookup(new_xcfg, ofbundle);
1259 xlate_xbundle_remove(new_xcfg, xbundle);
1260}
1261
46c88433
EJ
1262void
1263xlate_ofport_set(struct ofproto_dpif *ofproto, struct ofbundle *ofbundle,
1264 struct ofport_dpif *ofport, ofp_port_t ofp_port,
1265 odp_port_t odp_port, const struct netdev *netdev,
1266 const struct cfm *cfm, const struct bfd *bfd,
0477baa9
DF
1267 const struct lldp *lldp, struct ofport_dpif *peer,
1268 int stp_port_no, const struct rstp_port *rstp_port,
55954f6e 1269 const struct ofproto_port_queue *qdscp_list, size_t n_qdscp,
dd8cd4b4
SH
1270 enum ofputil_port_config config,
1271 enum ofputil_port_state state, bool is_tunnel,
9d189a50 1272 bool may_enable)
46c88433 1273{
55954f6e 1274 size_t i;
84f0f298
RW
1275 struct xport *xport;
1276
1277 ovs_assert(new_xcfg);
46c88433 1278
84f0f298 1279 xport = xport_lookup(new_xcfg, ofport);
46c88433
EJ
1280 if (!xport) {
1281 xport = xzalloc(sizeof *xport);
1282 xport->ofport = ofport;
84f0f298 1283 xport->xbridge = xbridge_lookup(new_xcfg, ofproto);
46c88433 1284 xport->ofp_port = ofp_port;
ab108418 1285 uuid_generate(&xport->uuid);
46c88433 1286
84f0f298 1287 xlate_xport_init(new_xcfg, xport);
46c88433
EJ
1288 }
1289
1290 ovs_assert(xport->ofp_port == ofp_port);
1291
0477baa9
DF
1292 xlate_xport_set(xport, odp_port, netdev, cfm, bfd, lldp,
1293 stp_port_no, rstp_port, config, state, is_tunnel,
1294 may_enable);
46c88433
EJ
1295
1296 if (xport->peer) {
1297 xport->peer->peer = NULL;
1298 }
84f0f298 1299 xport->peer = xport_lookup(new_xcfg, peer);
46c88433
EJ
1300 if (xport->peer) {
1301 xport->peer->peer = xport;
1302 }
1303
1304 if (xport->xbundle) {
417e7e66 1305 ovs_list_remove(&xport->bundle_node);
46c88433 1306 }
84f0f298 1307 xport->xbundle = xbundle_lookup(new_xcfg, ofbundle);
46c88433 1308 if (xport->xbundle) {
417e7e66 1309 ovs_list_insert(&xport->xbundle->xports, &xport->bundle_node);
46c88433 1310 }
55954f6e
EJ
1311
1312 clear_skb_priorities(xport);
1313 for (i = 0; i < n_qdscp; i++) {
1314 struct skb_priority_to_dscp *pdscp;
1315 uint32_t skb_priority;
1316
89a8a7f0
EJ
1317 if (dpif_queue_to_priority(xport->xbridge->dpif, qdscp_list[i].queue,
1318 &skb_priority)) {
55954f6e
EJ
1319 continue;
1320 }
1321
1322 pdscp = xmalloc(sizeof *pdscp);
1323 pdscp->skb_priority = skb_priority;
1324 pdscp->dscp = (qdscp_list[i].dscp << 2) & IP_DSCP_MASK;
1325 hmap_insert(&xport->skb_priorities, &pdscp->hmap_node,
1326 hash_int(pdscp->skb_priority, 0));
1327 }
46c88433
EJ
1328}
1329
84f0f298
RW
1330static void
1331xlate_xport_remove(struct xlate_cfg *xcfg, struct xport *xport)
46c88433 1332{
46c88433
EJ
1333 if (!xport) {
1334 return;
1335 }
1336
1337 if (xport->peer) {
1338 xport->peer->peer = NULL;
1339 xport->peer = NULL;
1340 }
1341
e621a12d 1342 if (xport->xbundle) {
417e7e66 1343 ovs_list_remove(&xport->bundle_node);
e621a12d
EJ
1344 }
1345
55954f6e
EJ
1346 clear_skb_priorities(xport);
1347 hmap_destroy(&xport->skb_priorities);
1348
84f0f298 1349 hmap_remove(&xcfg->xports, &xport->hmap_node);
ab108418 1350 hmap_remove(&xcfg->xports_uuid, &xport->uuid_node);
46c88433
EJ
1351 hmap_remove(&xport->xbridge->xports, &xport->ofp_node);
1352
1353 netdev_close(xport->netdev);
f025bcb7 1354 rstp_port_unref(xport->rstp_port);
46c88433
EJ
1355 cfm_unref(xport->cfm);
1356 bfd_unref(xport->bfd);
0477baa9 1357 lldp_unref(xport->lldp);
46c88433
EJ
1358 free(xport);
1359}
1360
84f0f298
RW
1361void
1362xlate_ofport_remove(struct ofport_dpif *ofport)
1363{
1364 struct xport *xport;
1365
1366 ovs_assert(new_xcfg);
1367
1368 xport = xport_lookup(new_xcfg, ofport);
1369 xlate_xport_remove(new_xcfg, xport);
1370}
1371
ef377a58
JR
1372static struct ofproto_dpif *
1373xlate_lookup_ofproto_(const struct dpif_backer *backer, const struct flow *flow,
1374 ofp_port_t *ofp_in_port, const struct xport **xportp)
1375{
e672ff9b 1376 struct xlate_cfg *xcfg = ovsrcu_get(struct xlate_cfg *, &xcfgp);
ef377a58 1377 const struct xport *xport;
f9038ef6 1378
43e73536
ZB
1379 /* If packet is recirculated, xport can be retrieved from frozen state. */
1380 if (flow->recirc_id) {
1381 const struct recirc_id_node *recirc_id_node;
1382
1383 recirc_id_node = recirc_id_node_find(flow->recirc_id);
1384
1385 if (OVS_UNLIKELY(!recirc_id_node)) {
1386 return NULL;
1387 }
1388
1389 /* If recirculation was initiated due to bond (in_port = OFPP_NONE)
1390 * then frozen state is static and xport_uuid is not defined, so xport
1391 * cannot be restored from frozen state. */
1392 if (recirc_id_node->state.metadata.in_port != OFPP_NONE) {
1393 struct uuid xport_uuid = recirc_id_node->state.xport_uuid;
1394 xport = xport_lookup_by_uuid(xcfg, &xport_uuid);
1395 if (xport && xport->xbridge && xport->xbridge->ofproto) {
1396 goto out;
1397 }
1398 }
1399 }
1400
e672ff9b
JR
1401 xport = xport_lookup(xcfg, tnl_port_should_receive(flow)
1402 ? tnl_port_receive(flow)
1403 : odp_port_to_ofport(backer, flow->in_port.odp_port));
1404 if (OVS_UNLIKELY(!xport)) {
1405 return NULL;
ef377a58 1406 }
43e73536
ZB
1407
1408out:
e672ff9b 1409 *xportp = xport;
f9038ef6 1410 if (ofp_in_port) {
e672ff9b 1411 *ofp_in_port = xport->ofp_port;
f9038ef6 1412 }
e672ff9b 1413 return xport->xbridge->ofproto;
ef377a58
JR
1414}
1415
1416/* Given a datapath and flow metadata ('backer', and 'flow' respectively)
1417 * returns the corresponding struct ofproto_dpif and OpenFlow port number. */
1418struct ofproto_dpif *
1419xlate_lookup_ofproto(const struct dpif_backer *backer, const struct flow *flow,
1420 ofp_port_t *ofp_in_port)
1421{
1422 const struct xport *xport;
1423
1424 return xlate_lookup_ofproto_(backer, flow, ofp_in_port, &xport);
1425}
1426
cc377352 1427/* Given a datapath and flow metadata ('backer', and 'flow' respectively),
bfc691bb 1428 * optionally populates 'ofprotop' with the ofproto_dpif, 'ofp_in_port' with the
cc377352 1429 * openflow in_port, and 'ipfix', 'sflow', and 'netflow' with the appropriate
dcc2c6cd
JR
1430 * handles for those protocols if they're enabled. Caller may use the returned
1431 * pointers until quiescing, for longer term use additional references must
1432 * be taken.
8449c4d6 1433 *
f9038ef6 1434 * Returns 0 if successful, ENODEV if the parsed flow has no associated ofproto.
ef377a58 1435 */
8449c4d6 1436int
5c476ea3
JR
1437xlate_lookup(const struct dpif_backer *backer, const struct flow *flow,
1438 struct ofproto_dpif **ofprotop, struct dpif_ipfix **ipfix,
1439 struct dpif_sflow **sflow, struct netflow **netflow,
1440 ofp_port_t *ofp_in_port)
8449c4d6 1441{
ef377a58 1442 struct ofproto_dpif *ofproto;
84f0f298 1443 const struct xport *xport;
8449c4d6 1444
ef377a58 1445 ofproto = xlate_lookup_ofproto_(backer, flow, ofp_in_port, &xport);
8449c4d6 1446
f9038ef6 1447 if (!ofproto) {
cc377352 1448 return ENODEV;
8449c4d6 1449 }
8449c4d6 1450
ef377a58
JR
1451 if (ofprotop) {
1452 *ofprotop = ofproto;
8449c4d6
EJ
1453 }
1454
1dfdb9b3 1455 if (ipfix) {
f9038ef6 1456 *ipfix = xport ? xport->xbridge->ipfix : NULL;
1dfdb9b3
EJ
1457 }
1458
1459 if (sflow) {
f9038ef6 1460 *sflow = xport ? xport->xbridge->sflow : NULL;
1dfdb9b3
EJ
1461 }
1462
1463 if (netflow) {
f9038ef6 1464 *netflow = xport ? xport->xbridge->netflow : NULL;
1dfdb9b3 1465 }
f9038ef6 1466
cc377352 1467 return 0;
8449c4d6
EJ
1468}
1469
46c88433 1470static struct xbridge *
84f0f298 1471xbridge_lookup(struct xlate_cfg *xcfg, const struct ofproto_dpif *ofproto)
46c88433 1472{
84f0f298 1473 struct hmap *xbridges;
46c88433
EJ
1474 struct xbridge *xbridge;
1475
84f0f298 1476 if (!ofproto || !xcfg) {
5e6af486
EJ
1477 return NULL;
1478 }
1479
84f0f298
RW
1480 xbridges = &xcfg->xbridges;
1481
46c88433 1482 HMAP_FOR_EACH_IN_BUCKET (xbridge, hmap_node, hash_pointer(ofproto, 0),
84f0f298 1483 xbridges) {
46c88433
EJ
1484 if (xbridge->ofproto == ofproto) {
1485 return xbridge;
1486 }
1487 }
1488 return NULL;
1489}
1490
290835f9
BP
1491static struct xbridge *
1492xbridge_lookup_by_uuid(struct xlate_cfg *xcfg, const struct uuid *uuid)
1493{
1494 struct xbridge *xbridge;
1495
1496 HMAP_FOR_EACH (xbridge, hmap_node, &xcfg->xbridges) {
07a3cd5c 1497 if (uuid_equals(&xbridge->ofproto->uuid, uuid)) {
290835f9
BP
1498 return xbridge;
1499 }
1500 }
1501 return NULL;
1502}
1503
46c88433 1504static struct xbundle *
84f0f298 1505xbundle_lookup(struct xlate_cfg *xcfg, const struct ofbundle *ofbundle)
46c88433 1506{
84f0f298 1507 struct hmap *xbundles;
46c88433
EJ
1508 struct xbundle *xbundle;
1509
84f0f298 1510 if (!ofbundle || !xcfg) {
5e6af486
EJ
1511 return NULL;
1512 }
1513
84f0f298
RW
1514 xbundles = &xcfg->xbundles;
1515
46c88433 1516 HMAP_FOR_EACH_IN_BUCKET (xbundle, hmap_node, hash_pointer(ofbundle, 0),
84f0f298 1517 xbundles) {
46c88433
EJ
1518 if (xbundle->ofbundle == ofbundle) {
1519 return xbundle;
1520 }
1521 }
1522 return NULL;
1523}
1524
1525static struct xport *
84f0f298 1526xport_lookup(struct xlate_cfg *xcfg, const struct ofport_dpif *ofport)
46c88433 1527{
84f0f298 1528 struct hmap *xports;
46c88433
EJ
1529 struct xport *xport;
1530
84f0f298 1531 if (!ofport || !xcfg) {
5e6af486
EJ
1532 return NULL;
1533 }
1534
84f0f298
RW
1535 xports = &xcfg->xports;
1536
46c88433 1537 HMAP_FOR_EACH_IN_BUCKET (xport, hmap_node, hash_pointer(ofport, 0),
84f0f298 1538 xports) {
46c88433
EJ
1539 if (xport->ofport == ofport) {
1540 return xport;
1541 }
1542 }
1543 return NULL;
1544}
1545
43e73536
ZB
1546static struct xport *
1547xport_lookup_by_uuid(struct xlate_cfg *xcfg, const struct uuid *uuid)
1548{
1549 struct hmap *xports;
1550 struct xport *xport;
1551
1552 if (uuid_is_zero(uuid) || !xcfg) {
1553 return NULL;
1554 }
1555
1556 xports = &xcfg->xports_uuid;
1557
1558 HMAP_FOR_EACH_IN_BUCKET (xport, uuid_node, uuid_hash(uuid), xports) {
1559 if (uuid_equals(&xport->uuid, uuid)) {
1560 return xport;
1561 }
1562 }
1563 return NULL;
1564}
1565
40085e56
EJ
1566static struct stp_port *
1567xport_get_stp_port(const struct xport *xport)
1568{
92cf817b 1569 return xport->xbridge->stp && xport->stp_port_no != -1
40085e56
EJ
1570 ? stp_get_port(xport->xbridge->stp, xport->stp_port_no)
1571 : NULL;
1572}
9d189a50 1573
0d1cee12 1574static bool
9d189a50
EJ
1575xport_stp_learn_state(const struct xport *xport)
1576{
40085e56 1577 struct stp_port *sp = xport_get_stp_port(xport);
4b5f1996
DV
1578 return sp
1579 ? stp_learn_in_state(stp_port_get_state(sp))
1580 : true;
9d189a50
EJ
1581}
1582
1583static bool
1584xport_stp_forward_state(const struct xport *xport)
1585{
40085e56 1586 struct stp_port *sp = xport_get_stp_port(xport);
4b5f1996
DV
1587 return sp
1588 ? stp_forward_in_state(stp_port_get_state(sp))
1589 : true;
9d189a50
EJ
1590}
1591
0d1cee12 1592static bool
bacdb85a 1593xport_stp_should_forward_bpdu(const struct xport *xport)
0d1cee12
K
1594{
1595 struct stp_port *sp = xport_get_stp_port(xport);
bacdb85a 1596 return stp_should_forward_bpdu(sp ? stp_port_get_state(sp) : STP_DISABLED);
0d1cee12
K
1597}
1598
9d189a50
EJ
1599/* Returns true if STP should process 'flow'. Sets fields in 'wc' that
1600 * were used to make the determination.*/
1601static bool
1602stp_should_process_flow(const struct flow *flow, struct flow_wildcards *wc)
1603{
bbbca389 1604 /* is_stp() also checks dl_type, but dl_type is always set in 'wc'. */
9d189a50 1605 memset(&wc->masks.dl_dst, 0xff, sizeof wc->masks.dl_dst);
bbbca389 1606 return is_stp(flow);
9d189a50
EJ
1607}
1608
1609static void
cf62fa4c 1610stp_process_packet(const struct xport *xport, const struct dp_packet *packet)
9d189a50 1611{
40085e56 1612 struct stp_port *sp = xport_get_stp_port(xport);
cf62fa4c
PS
1613 struct dp_packet payload = *packet;
1614 struct eth_header *eth = dp_packet_data(&payload);
9d189a50
EJ
1615
1616 /* Sink packets on ports that have STP disabled when the bridge has
1617 * STP enabled. */
1618 if (!sp || stp_port_get_state(sp) == STP_DISABLED) {
1619 return;
1620 }
1621
1622 /* Trim off padding on payload. */
cf62fa4c
PS
1623 if (dp_packet_size(&payload) > ntohs(eth->eth_type) + ETH_HEADER_LEN) {
1624 dp_packet_set_size(&payload, ntohs(eth->eth_type) + ETH_HEADER_LEN);
9d189a50
EJ
1625 }
1626
cf62fa4c
PS
1627 if (dp_packet_try_pull(&payload, ETH_HEADER_LEN + LLC_HEADER_LEN)) {
1628 stp_received_bpdu(sp, dp_packet_data(&payload), dp_packet_size(&payload));
9d189a50
EJ
1629 }
1630}
1631
f025bcb7
JR
1632static enum rstp_state
1633xport_get_rstp_port_state(const struct xport *xport)
9efd308e 1634{
f025bcb7
JR
1635 return xport->rstp_port
1636 ? rstp_port_get_state(xport->rstp_port)
1637 : RSTP_DISABLED;
9efd308e
DV
1638}
1639
1640static bool
1641xport_rstp_learn_state(const struct xport *xport)
1642{
4b5f1996
DV
1643 return xport->xbridge->rstp && xport->rstp_port
1644 ? rstp_learn_in_state(xport_get_rstp_port_state(xport))
1645 : true;
9efd308e
DV
1646}
1647
1648static bool
1649xport_rstp_forward_state(const struct xport *xport)
1650{
4b5f1996
DV
1651 return xport->xbridge->rstp && xport->rstp_port
1652 ? rstp_forward_in_state(xport_get_rstp_port_state(xport))
1653 : true;
9efd308e
DV
1654}
1655
1656static bool
1657xport_rstp_should_manage_bpdu(const struct xport *xport)
1658{
f025bcb7 1659 return rstp_should_manage_bpdu(xport_get_rstp_port_state(xport));
9efd308e
DV
1660}
1661
1662static void
cf62fa4c 1663rstp_process_packet(const struct xport *xport, const struct dp_packet *packet)
9efd308e 1664{
cf62fa4c
PS
1665 struct dp_packet payload = *packet;
1666 struct eth_header *eth = dp_packet_data(&payload);
9efd308e 1667
f025bcb7
JR
1668 /* Sink packets on ports that have no RSTP. */
1669 if (!xport->rstp_port) {
9efd308e
DV
1670 return;
1671 }
1672
1673 /* Trim off padding on payload. */
cf62fa4c
PS
1674 if (dp_packet_size(&payload) > ntohs(eth->eth_type) + ETH_HEADER_LEN) {
1675 dp_packet_set_size(&payload, ntohs(eth->eth_type) + ETH_HEADER_LEN);
9efd308e
DV
1676 }
1677
cf62fa4c
PS
1678 if (dp_packet_try_pull(&payload, ETH_HEADER_LEN + LLC_HEADER_LEN)) {
1679 rstp_port_received_bpdu(xport->rstp_port, dp_packet_data(&payload),
1680 dp_packet_size(&payload));
9efd308e
DV
1681 }
1682}
1683
46c88433
EJ
1684static struct xport *
1685get_ofp_port(const struct xbridge *xbridge, ofp_port_t ofp_port)
1686{
1687 struct xport *xport;
1688
1689 HMAP_FOR_EACH_IN_BUCKET (xport, ofp_node, hash_ofp_port(ofp_port),
1690 &xbridge->xports) {
1691 if (xport->ofp_port == ofp_port) {
1692 return xport;
1693 }
1694 }
1695 return NULL;
1696}
1697
1698static odp_port_t
1699ofp_port_to_odp_port(const struct xbridge *xbridge, ofp_port_t ofp_port)
1700{
1701 const struct xport *xport = get_ofp_port(xbridge, ofp_port);
1702 return xport ? xport->odp_port : ODPP_NONE;
1703}
1704
dd8cd4b4
SH
1705static bool
1706odp_port_is_alive(const struct xlate_ctx *ctx, ofp_port_t ofp_port)
1707{
086fa873
BP
1708 struct xport *xport = get_ofp_port(ctx->xbridge, ofp_port);
1709 return xport && xport->may_enable;
dd8cd4b4
SH
1710}
1711
1e684d7d 1712static struct ofputil_bucket *
dd8cd4b4
SH
1713group_first_live_bucket(const struct xlate_ctx *, const struct group_dpif *,
1714 int depth);
1715
1716static bool
1717group_is_alive(const struct xlate_ctx *ctx, uint32_t group_id, int depth)
1718{
1719 struct group_dpif *group;
dd8cd4b4 1720
5d08a275 1721 group = group_dpif_lookup(ctx->xbridge->ofproto, group_id,
1f4a8933 1722 ctx->xin->tables_version, false);
db88b35c 1723 if (group) {
76973237 1724 return group_first_live_bucket(ctx, group, depth) != NULL;
dc25893e 1725 }
dd8cd4b4 1726
dc25893e 1727 return false;
dd8cd4b4
SH
1728}
1729
1730#define MAX_LIVENESS_RECURSION 128 /* Arbitrary limit */
1731
1732static bool
1733bucket_is_alive(const struct xlate_ctx *ctx,
1e684d7d 1734 struct ofputil_bucket *bucket, int depth)
dd8cd4b4
SH
1735{
1736 if (depth >= MAX_LIVENESS_RECURSION) {
2d9b49dd
BP
1737 xlate_report_error(ctx, "bucket chaining exceeded %d links",
1738 MAX_LIVENESS_RECURSION);
dd8cd4b4
SH
1739 return false;
1740 }
1741
fdb1999b
AZ
1742 return (!ofputil_bucket_has_liveness(bucket)
1743 || (bucket->watch_port != OFPP_ANY
1744 && odp_port_is_alive(ctx, bucket->watch_port))
1745 || (bucket->watch_group != OFPG_ANY
1746 && group_is_alive(ctx, bucket->watch_group, depth + 1)));
dd8cd4b4
SH
1747}
1748
1e684d7d 1749static struct ofputil_bucket *
dd8cd4b4
SH
1750group_first_live_bucket(const struct xlate_ctx *ctx,
1751 const struct group_dpif *group, int depth)
1752{
1753 struct ofputil_bucket *bucket;
07a3cd5c 1754 LIST_FOR_EACH (bucket, list_node, &group->up.buckets) {
dd8cd4b4
SH
1755 if (bucket_is_alive(ctx, bucket, depth)) {
1756 return bucket;
1757 }
1758 }
1759
1760 return NULL;
1761}
1762
1e684d7d 1763static struct ofputil_bucket *
fe7e5749
SH
1764group_best_live_bucket(const struct xlate_ctx *ctx,
1765 const struct group_dpif *group,
1766 uint32_t basis)
1767{
1e684d7d 1768 struct ofputil_bucket *best_bucket = NULL;
fe7e5749 1769 uint32_t best_score = 0;
fe7e5749 1770
1e684d7d 1771 struct ofputil_bucket *bucket;
07a3cd5c 1772 LIST_FOR_EACH (bucket, list_node, &group->up.buckets) {
fe7e5749 1773 if (bucket_is_alive(ctx, bucket, 0)) {
c09cb861
LS
1774 uint32_t score =
1775 (hash_int(bucket->bucket_id, basis) & 0xffff) * bucket->weight;
fe7e5749
SH
1776 if (score >= best_score) {
1777 best_bucket = bucket;
1778 best_score = score;
1779 }
1780 }
fe7e5749
SH
1781 }
1782
1783 return best_bucket;
1784}
1785
9583bc14 1786static bool
46c88433 1787xbundle_trunks_vlan(const struct xbundle *bundle, uint16_t vlan)
9583bc14
EJ
1788{
1789 return (bundle->vlan_mode != PORT_VLAN_ACCESS
1790 && (!bundle->trunks || bitmap_is_set(bundle->trunks, vlan)));
1791}
1792
fed8962a
EG
1793static bool
1794xbundle_allows_cvlan(const struct xbundle *bundle, uint16_t vlan)
1795{
1796 return (!bundle->cvlans || bitmap_is_set(bundle->cvlans, vlan));
1797}
1798
9583bc14 1799static bool
f0fb825a 1800xbundle_includes_vlan(const struct xbundle *xbundle, const struct xvlan *xvlan)
46c88433 1801{
f0fb825a
EG
1802 switch (xbundle->vlan_mode) {
1803 case PORT_VLAN_ACCESS:
1804 return xvlan->v[0].vid == xbundle->vlan && xvlan->v[1].vid == 0;
1805
1806 case PORT_VLAN_TRUNK:
1807 case PORT_VLAN_NATIVE_UNTAGGED:
1808 case PORT_VLAN_NATIVE_TAGGED:
1809 return xbundle_trunks_vlan(xbundle, xvlan->v[0].vid);
1810
fed8962a
EG
1811 case PORT_VLAN_DOT1Q_TUNNEL:
1812 return xvlan->v[0].vid == xbundle->vlan &&
1813 xbundle_allows_cvlan(xbundle, xvlan->v[1].vid);
1814
f0fb825a
EG
1815 default:
1816 OVS_NOT_REACHED();
1817 }
46c88433
EJ
1818}
1819
1820static mirror_mask_t
1821xbundle_mirror_out(const struct xbridge *xbridge, struct xbundle *xbundle)
1822{
1823 return xbundle != &ofpp_none_bundle
1824 ? mirror_bundle_out(xbridge->mbridge, xbundle->ofbundle)
1825 : 0;
1826}
1827
1828static mirror_mask_t
1829xbundle_mirror_src(const struct xbridge *xbridge, struct xbundle *xbundle)
9583bc14 1830{
46c88433
EJ
1831 return xbundle != &ofpp_none_bundle
1832 ? mirror_bundle_src(xbridge->mbridge, xbundle->ofbundle)
1833 : 0;
9583bc14
EJ
1834}
1835
46c88433
EJ
1836static mirror_mask_t
1837xbundle_mirror_dst(const struct xbridge *xbridge, struct xbundle *xbundle)
9583bc14 1838{
46c88433
EJ
1839 return xbundle != &ofpp_none_bundle
1840 ? mirror_bundle_dst(xbridge->mbridge, xbundle->ofbundle)
1841 : 0;
1842}
1843
1844static struct xbundle *
2d9b49dd
BP
1845lookup_input_bundle__(const struct xbridge *xbridge,
1846 ofp_port_t in_port, struct xport **in_xportp)
46c88433
EJ
1847{
1848 struct xport *xport;
9583bc14
EJ
1849
1850 /* Find the port and bundle for the received packet. */
46c88433
EJ
1851 xport = get_ofp_port(xbridge, in_port);
1852 if (in_xportp) {
1853 *in_xportp = xport;
9583bc14 1854 }
46c88433
EJ
1855 if (xport && xport->xbundle) {
1856 return xport->xbundle;
9583bc14
EJ
1857 }
1858
6362203b
YT
1859 /* Special-case OFPP_NONE (OF1.0) and OFPP_CONTROLLER (OF1.1+),
1860 * which a controller may use as the ingress port for traffic that
1861 * it is sourcing. */
1862 if (in_port == OFPP_CONTROLLER || in_port == OFPP_NONE) {
9583bc14
EJ
1863 return &ofpp_none_bundle;
1864 }
2d9b49dd
BP
1865 return NULL;
1866}
9583bc14 1867
2d9b49dd
BP
1868static struct xbundle *
1869lookup_input_bundle(const struct xlate_ctx *ctx,
1870 ofp_port_t in_port, struct xport **in_xportp)
1871{
1872 struct xbundle *xbundle = lookup_input_bundle__(ctx->xbridge,
1873 in_port, in_xportp);
1874 if (!xbundle) {
1875 /* Odd. A few possible reasons here:
1876 *
1877 * - We deleted a port but there are still a few packets queued up
1878 * from it.
1879 *
1880 * - Someone externally added a port (e.g. "ovs-dpctl add-if") that
1881 * we don't know about.
1882 *
1883 * - The ofproto client didn't configure the port as part of a bundle.
1884 * This is particularly likely to happen if a packet was received on
1885 * the port after it was created, but before the client had a chance
1886 * to configure its bundle.
1887 */
94783c7c 1888 xlate_report_error(ctx, "received packet on unknown port %"PRIu32,
2d9b49dd 1889 in_port);
9583bc14 1890 }
2d9b49dd 1891 return xbundle;
9583bc14
EJ
1892}
1893
faa624b4
BP
1894/* Mirrors the packet represented by 'ctx' to appropriate mirror destinations,
1895 * given the packet is ingressing or egressing on 'xbundle', which has ingress
1896 * or egress (as appropriate) mirrors 'mirrors'. */
9583bc14 1897static void
7efbc3b7
BP
1898mirror_packet(struct xlate_ctx *ctx, struct xbundle *xbundle,
1899 mirror_mask_t mirrors)
9583bc14 1900{
f0fb825a
EG
1901 struct xvlan in_xvlan;
1902 struct xvlan xvlan;
1903
faa624b4
BP
1904 /* Figure out what VLAN the packet is in (because mirrors can select
1905 * packets on basis of VLAN). */
f0fb825a
EG
1906 xvlan_extract(&ctx->xin->flow, &in_xvlan);
1907 if (!input_vid_is_valid(ctx, in_xvlan.v[0].vid, xbundle)) {
9583bc14
EJ
1908 return;
1909 }
f0fb825a 1910 xvlan_input_translate(xbundle, &in_xvlan, &xvlan);
9583bc14 1911
7efbc3b7 1912 const struct xbridge *xbridge = ctx->xbridge;
9583bc14 1913
7efbc3b7
BP
1914 /* Don't mirror to destinations that we've already mirrored to. */
1915 mirrors &= ~ctx->mirrors;
9583bc14
EJ
1916 if (!mirrors) {
1917 return;
1918 }
1919
7efbc3b7
BP
1920 if (ctx->xin->resubmit_stats) {
1921 mirror_update_stats(xbridge->mbridge, mirrors,
1922 ctx->xin->resubmit_stats->n_packets,
1923 ctx->xin->resubmit_stats->n_bytes);
1924 }
1925 if (ctx->xin->xcache) {
1926 struct xc_entry *entry;
1927
1928 entry = xlate_cache_add_entry(ctx->xin->xcache, XC_MIRROR);
901a517e
JR
1929 entry->mirror.mbridge = mbridge_ref(xbridge->mbridge);
1930 entry->mirror.mirrors = mirrors;
7efbc3b7 1931 }
9583bc14 1932
faa624b4
BP
1933 /* 'mirrors' is a bit-mask of candidates for mirroring. Iterate as long as
1934 * some candidates remain. */
9583bc14 1935 while (mirrors) {
7efbc3b7 1936 const unsigned long *vlans;
ec7ceaed
EJ
1937 mirror_mask_t dup_mirrors;
1938 struct ofbundle *out;
ec7ceaed 1939 int out_vlan;
1356dbd1 1940 int snaplen;
ec7ceaed 1941
faa624b4 1942 /* Get the details of the mirror represented by the rightmost 1-bit. */
7efbc3b7 1943 bool has_mirror = mirror_get(xbridge->mbridge, raw_ctz(mirrors),
1356dbd1
WT
1944 &vlans, &dup_mirrors,
1945 &out, &snaplen, &out_vlan);
ec7ceaed
EJ
1946 ovs_assert(has_mirror);
1947
1356dbd1 1948
faa624b4
BP
1949 /* If this mirror selects on the basis of VLAN, and it does not select
1950 * 'vlan', then discard this mirror and go on to the next one. */
ec7ceaed 1951 if (vlans) {
f0fb825a 1952 ctx->wc->masks.vlans[0].tci |= htons(VLAN_CFI | VLAN_VID_MASK);
9583bc14 1953 }
f0fb825a 1954 if (vlans && !bitmap_is_set(vlans, xvlan.v[0].vid)) {
9583bc14
EJ
1955 mirrors = zero_rightmost_1bit(mirrors);
1956 continue;
1957 }
1958
faa624b4
BP
1959 /* Record the mirror, and the mirrors that output to the same
1960 * destination, so that we don't mirror to them again. This must be
1961 * done now to ensure that output_normal(), below, doesn't recursively
1962 * output to the same mirrors. */
3d6151f3 1963 ctx->mirrors |= dup_mirrors;
1356dbd1 1964 ctx->mirror_snaplen = snaplen;
faa624b4
BP
1965
1966 /* Send the packet to the mirror. */
ec7ceaed 1967 if (out) {
68f515ca 1968 struct xbundle *out_xbundle = xbundle_lookup(ctx->xcfg, out);
46c88433 1969 if (out_xbundle) {
f0fb825a 1970 output_normal(ctx, out_xbundle, &xvlan);
46c88433 1971 }
f0fb825a 1972 } else if (xvlan.v[0].vid != out_vlan
7efbc3b7 1973 && !eth_addr_is_reserved(ctx->xin->flow.dl_dst)) {
71f21279 1974 struct xbundle *xb;
f0fb825a 1975 uint16_t old_vid = xvlan.v[0].vid;
9583bc14 1976
f0fb825a 1977 xvlan.v[0].vid = out_vlan;
71f21279
BP
1978 LIST_FOR_EACH (xb, list_node, &xbridge->xbundles) {
1979 if (xbundle_includes_vlan(xb, &xvlan)
1980 && !xbundle_mirror_out(xbridge, xb)) {
1981 output_normal(ctx, xb, &xvlan);
9583bc14
EJ
1982 }
1983 }
f0fb825a 1984 xvlan.v[0].vid = old_vid;
9583bc14 1985 }
faa624b4
BP
1986
1987 /* output_normal() could have recursively output (to different
1988 * mirrors), so make sure that we don't send duplicates. */
1989 mirrors &= ~ctx->mirrors;
1356dbd1 1990 ctx->mirror_snaplen = 0;
9583bc14
EJ
1991 }
1992}
1993
7efbc3b7
BP
1994static void
1995mirror_ingress_packet(struct xlate_ctx *ctx)
1996{
1997 if (mbridge_has_mirrors(ctx->xbridge->mbridge)) {
7efbc3b7 1998 struct xbundle *xbundle = lookup_input_bundle(
2d9b49dd 1999 ctx, ctx->xin->flow.in_port.ofp_port, NULL);
7efbc3b7
BP
2000 if (xbundle) {
2001 mirror_packet(ctx, xbundle,
2002 xbundle_mirror_src(ctx->xbridge, xbundle));
2003 }
2004 }
2005}
2006
46c88433 2007/* Checks whether a packet with the given 'vid' may ingress on 'in_xbundle'.
2d9b49dd 2008 * If so, returns true. Otherwise, returns false.
9583bc14
EJ
2009 *
2010 * 'vid' should be the VID obtained from the 802.1Q header that was received as
2011 * part of a packet (specify 0 if there was no 802.1Q header), in the range
2012 * 0...4095. */
2013static bool
2d9b49dd
BP
2014input_vid_is_valid(const struct xlate_ctx *ctx,
2015 uint16_t vid, struct xbundle *in_xbundle)
9583bc14
EJ
2016{
2017 /* Allow any VID on the OFPP_NONE port. */
46c88433 2018 if (in_xbundle == &ofpp_none_bundle) {
9583bc14
EJ
2019 return true;
2020 }
2021
46c88433 2022 switch (in_xbundle->vlan_mode) {
9583bc14
EJ
2023 case PORT_VLAN_ACCESS:
2024 if (vid) {
2d9b49dd
BP
2025 xlate_report_error(ctx, "dropping VLAN %"PRIu16" tagged "
2026 "packet received on port %s configured as VLAN "
fd13c6b5 2027 "%d access port", vid, in_xbundle->name,
2d9b49dd 2028 in_xbundle->vlan);
9583bc14
EJ
2029 return false;
2030 }
2031 return true;
2032
2033 case PORT_VLAN_NATIVE_UNTAGGED:
2034 case PORT_VLAN_NATIVE_TAGGED:
2035 if (!vid) {
2036 /* Port must always carry its native VLAN. */
2037 return true;
2038 }
2039 /* Fall through. */
2040 case PORT_VLAN_TRUNK:
f0fb825a 2041 if (!xbundle_trunks_vlan(in_xbundle, vid)) {
2d9b49dd
BP
2042 xlate_report_error(ctx, "dropping VLAN %"PRIu16" packet "
2043 "received on port %s not configured for "
2044 "trunking VLAN %"PRIu16,
2045 vid, in_xbundle->name, vid);
9583bc14
EJ
2046 return false;
2047 }
2048 return true;
2049
fed8962a
EG
2050 case PORT_VLAN_DOT1Q_TUNNEL:
2051 if (!xbundle_allows_cvlan(in_xbundle, vid)) {
2052 xlate_report_error(ctx, "dropping VLAN %"PRIu16" packet received "
2053 "on dot1q-tunnel port %s that excludes this "
2054 "VLAN", vid, in_xbundle->name);
2055 return false;
2056 }
2057 return true;
2058
9583bc14 2059 default:
428b2edd 2060 OVS_NOT_REACHED();
9583bc14
EJ
2061 }
2062
2063}
2064
f0fb825a
EG
2065static void
2066xvlan_copy(struct xvlan *dst, const struct xvlan *src)
2067{
2068 *dst = *src;
2069}
2070
2071static void
2072xvlan_pop(struct xvlan *src)
2073{
2074 memmove(&src->v[0], &src->v[1], sizeof(src->v) - sizeof(src->v[0]));
2075 memset(&src->v[FLOW_MAX_VLAN_HEADERS - 1], 0,
2076 sizeof(src->v[FLOW_MAX_VLAN_HEADERS - 1]));
2077}
2078
fed8962a
EG
2079static void
2080xvlan_push_uninit(struct xvlan *src)
2081{
2082 memmove(&src->v[1], &src->v[0], sizeof(src->v) - sizeof(src->v[0]));
2083 memset(&src->v[0], 0, sizeof(src->v[0]));
2084}
2085
f0fb825a
EG
2086/* Extract VLAN information (headers) from flow */
2087static void
2088xvlan_extract(const struct flow *flow, struct xvlan *xvlan)
2089{
2090 int i;
2091 memset(xvlan, 0, sizeof(*xvlan));
2092 for (i = 0; i < FLOW_MAX_VLAN_HEADERS; i++) {
2093 if (!eth_type_vlan(flow->vlans[i].tpid) ||
2094 !(flow->vlans[i].tci & htons(VLAN_CFI))) {
2095 break;
2096 }
2097 xvlan->v[i].tpid = ntohs(flow->vlans[i].tpid);
2098 xvlan->v[i].vid = vlan_tci_to_vid(flow->vlans[i].tci);
2099 xvlan->v[i].pcp = ntohs(flow->vlans[i].tci) & VLAN_PCP_MASK;
2100 }
2101}
2102
2103/* Put VLAN information (headers) to flow */
2104static void
2105xvlan_put(struct flow *flow, const struct xvlan *xvlan)
2106{
2107 ovs_be16 tci;
2108 int i;
2109 for (i = 0; i < FLOW_MAX_VLAN_HEADERS; i++) {
2110 tci = htons(xvlan->v[i].vid | (xvlan->v[i].pcp & VLAN_PCP_MASK));
2111 if (tci) {
2112 tci |= htons(VLAN_CFI);
2113 flow->vlans[i].tpid = xvlan->v[i].tpid ?
2114 htons(xvlan->v[i].tpid) :
2115 htons(ETH_TYPE_VLAN_8021Q);
2116 }
2117 flow->vlans[i].tci = tci;
2118 }
2119}
2120
2121/* Given 'in_xvlan', extracted from the input 802.1Q headers received as part
2122 * of a packet, and 'in_xbundle', the bundle on which the packet was received,
2123 * returns the VLANs of the packet during bridge internal processing. */
2124static void
2125xvlan_input_translate(const struct xbundle *in_xbundle,
2126 const struct xvlan *in_xvlan, struct xvlan *xvlan)
2127{
2128
2129 switch (in_xbundle->vlan_mode) {
2130 case PORT_VLAN_ACCESS:
2131 memset(xvlan, 0, sizeof(*xvlan));
2132 xvlan->v[0].tpid = in_xvlan->v[0].tpid ? in_xvlan->v[0].tpid :
2133 ETH_TYPE_VLAN_8021Q;
2134 xvlan->v[0].vid = in_xbundle->vlan;
2135 xvlan->v[0].pcp = in_xvlan->v[0].pcp;
2136 break;
2137
2138 case PORT_VLAN_TRUNK:
2139 xvlan_copy(xvlan, in_xvlan);
2140 break;
2141
2142 case PORT_VLAN_NATIVE_UNTAGGED:
2143 case PORT_VLAN_NATIVE_TAGGED:
2144 xvlan_copy(xvlan, in_xvlan);
2145 if (!in_xvlan->v[0].vid) {
2146 xvlan->v[0].tpid = in_xvlan->v[0].tpid ? in_xvlan->v[0].tpid :
2147 ETH_TYPE_VLAN_8021Q;
2148 xvlan->v[0].vid = in_xbundle->vlan;
2149 xvlan->v[0].pcp = in_xvlan->v[0].pcp;
2150 }
2151 break;
2152
fed8962a
EG
2153 case PORT_VLAN_DOT1Q_TUNNEL:
2154 xvlan_copy(xvlan, in_xvlan);
2155 xvlan_push_uninit(xvlan);
2156 xvlan->v[0].tpid = in_xbundle->qinq_ethtype;
2157 xvlan->v[0].vid = in_xbundle->vlan;
2158 xvlan->v[0].pcp = 0;
2159 break;
2160
f0fb825a
EG
2161 default:
2162 OVS_NOT_REACHED();
2163 }
2164}
2165
2166/* Given 'xvlan', the VLANs of a packet during internal processing, and
2167 * 'out_xbundle', a bundle on which the packet is to be output, returns the
2168 * VLANs that should be included in output packet. */
2169static void
2170xvlan_output_translate(const struct xbundle *out_xbundle,
2171 const struct xvlan *xvlan, struct xvlan *out_xvlan)
9583bc14 2172{
46c88433 2173 switch (out_xbundle->vlan_mode) {
9583bc14 2174 case PORT_VLAN_ACCESS:
f0fb825a
EG
2175 memset(out_xvlan, 0, sizeof(*out_xvlan));
2176 break;
9583bc14
EJ
2177
2178 case PORT_VLAN_TRUNK:
2179 case PORT_VLAN_NATIVE_TAGGED:
f0fb825a
EG
2180 xvlan_copy(out_xvlan, xvlan);
2181 break;
9583bc14
EJ
2182
2183 case PORT_VLAN_NATIVE_UNTAGGED:
f0fb825a
EG
2184 xvlan_copy(out_xvlan, xvlan);
2185 if (xvlan->v[0].vid == out_xbundle->vlan) {
2186 xvlan_pop(out_xvlan);
2187 }
2188 break;
9583bc14 2189
fed8962a
EG
2190 case PORT_VLAN_DOT1Q_TUNNEL:
2191 xvlan_copy(out_xvlan, xvlan);
2192 xvlan_pop(out_xvlan);
2193 break;
2194
9583bc14 2195 default:
428b2edd 2196 OVS_NOT_REACHED();
9583bc14
EJ
2197 }
2198}
2199
fed8962a
EG
2200/* If output xbundle is dot1q-tunnel, set mask bits of cvlan */
2201static void
2202check_and_set_cvlan_mask(struct flow_wildcards *wc,
2203 const struct xbundle *xbundle)
2204{
2205 if (xbundle->vlan_mode == PORT_VLAN_DOT1Q_TUNNEL && xbundle->cvlans) {
2206 wc->masks.vlans[1].tci = htons(0xffff);
2207 }
2208}
2209
9583bc14 2210static void
46c88433 2211output_normal(struct xlate_ctx *ctx, const struct xbundle *out_xbundle,
f0fb825a 2212 const struct xvlan *xvlan)
9583bc14 2213{
9583bc14 2214 uint16_t vid;
f0fb825a 2215 union flow_vlan_hdr old_vlans[FLOW_MAX_VLAN_HEADERS];
46c88433 2216 struct xport *xport;
e93ef1c7
JR
2217 struct xlate_bond_recirc xr;
2218 bool use_recirc = false;
f0fb825a 2219 struct xvlan out_xvlan;
9583bc14 2220
fed8962a
EG
2221 check_and_set_cvlan_mask(ctx->wc, out_xbundle);
2222
f0fb825a
EG
2223 xvlan_output_translate(out_xbundle, xvlan, &out_xvlan);
2224 if (out_xbundle->use_priority_tags) {
2225 out_xvlan.v[0].pcp = ntohs(ctx->xin->flow.vlans[0].tci) &
2226 VLAN_PCP_MASK;
2227 }
2228 vid = out_xvlan.v[0].vid;
417e7e66 2229 if (ovs_list_is_empty(&out_xbundle->xports)) {
46c88433
EJ
2230 /* Partially configured bundle with no slaves. Drop the packet. */
2231 return;
2232 } else if (!out_xbundle->bond) {
417e7e66 2233 xport = CONTAINER_OF(ovs_list_front(&out_xbundle->xports), struct xport,
46c88433 2234 bundle_node);
9583bc14 2235 } else {
49a73e0c 2236 struct flow_wildcards *wc = ctx->wc;
84f0f298 2237 struct ofport_dpif *ofport;
adcf00ba 2238
a80aba3a
AZ
2239 if (ctx->xbridge->support.odp.recirc) {
2240 /* In case recirculation is not actually in use, 'xr.recirc_id'
2241 * will be set to '0', since a valid 'recirc_id' can
82f9f1f5
AZ
2242 * not be zero. */
2243 bond_update_post_recirc_rules(out_xbundle->bond,
2244 &xr.recirc_id,
2245 &xr.hash_basis);
2246 if (xr.recirc_id) {
2247 /* Use recirculation instead of output. */
2248 use_recirc = true;
e93ef1c7 2249 xr.hash_alg = OVS_HASH_ALG_L4;
54ecb5a2
AZ
2250 /* Recirculation does not require unmasking hash fields. */
2251 wc = NULL;
adcf00ba
AZ
2252 }
2253 }
46c88433 2254
54ecb5a2
AZ
2255 ofport = bond_choose_output_slave(out_xbundle->bond,
2256 &ctx->xin->flow, wc, vid);
68f515ca 2257 xport = xport_lookup(ctx->xcfg, ofport);
46c88433
EJ
2258
2259 if (!xport) {
9583bc14
EJ
2260 /* No slaves enabled, so drop packet. */
2261 return;
2262 }
d6fc5f57 2263
e93ef1c7 2264 /* If use_recirc is set, the main thread will handle stats
b256dc52 2265 * accounting for this bond. */
e93ef1c7 2266 if (!use_recirc) {
b256dc52
JS
2267 if (ctx->xin->resubmit_stats) {
2268 bond_account(out_xbundle->bond, &ctx->xin->flow, vid,
2269 ctx->xin->resubmit_stats->n_bytes);
2270 }
2271 if (ctx->xin->xcache) {
2272 struct xc_entry *entry;
2273 struct flow *flow;
2274
2275 flow = &ctx->xin->flow;
2276 entry = xlate_cache_add_entry(ctx->xin->xcache, XC_BOND);
901a517e
JR
2277 entry->bond.bond = bond_ref(out_xbundle->bond);
2278 entry->bond.flow = xmemdup(flow, sizeof *flow);
2279 entry->bond.vid = vid;
b256dc52 2280 }
d6fc5f57 2281 }
9583bc14
EJ
2282 }
2283
f0fb825a
EG
2284 memcpy(&old_vlans, &ctx->xin->flow.vlans, sizeof(old_vlans));
2285 xvlan_put(&ctx->xin->flow, &out_xvlan);
9583bc14 2286
feee58b9 2287 compose_output_action(ctx, xport->ofp_port, use_recirc ? &xr : NULL,
11938578 2288 false, false);
f0fb825a 2289 memcpy(&ctx->xin->flow.vlans, &old_vlans, sizeof(old_vlans));
9583bc14
EJ
2290}
2291
2292/* A VM broadcasts a gratuitous ARP to indicate that it has resumed after
2293 * migration. Older Citrix-patched Linux DomU used gratuitous ARP replies to
2294 * indicate this; newer upstream kernels use gratuitous ARP requests. */
2295static bool
2296is_gratuitous_arp(const struct flow *flow, struct flow_wildcards *wc)
2297{
2298 if (flow->dl_type != htons(ETH_TYPE_ARP)) {
2299 return false;
2300 }
2301
2302 memset(&wc->masks.dl_dst, 0xff, sizeof wc->masks.dl_dst);
2303 if (!eth_addr_is_broadcast(flow->dl_dst)) {
2304 return false;
2305 }
2306
2307 memset(&wc->masks.nw_proto, 0xff, sizeof wc->masks.nw_proto);
2308 if (flow->nw_proto == ARP_OP_REPLY) {
2309 return true;
2310 } else if (flow->nw_proto == ARP_OP_REQUEST) {
2311 memset(&wc->masks.nw_src, 0xff, sizeof wc->masks.nw_src);
2312 memset(&wc->masks.nw_dst, 0xff, sizeof wc->masks.nw_dst);
2313
2314 return flow->nw_src == flow->nw_dst;
2315 } else {
2316 return false;
2317 }
2318}
2319
ff69c24a
FL
2320/* Determines whether packets in 'flow' within 'xbridge' should be forwarded or
2321 * dropped. Returns true if they may be forwarded, false if they should be
2322 * dropped.
2323 *
2324 * 'in_port' must be the xport that corresponds to flow->in_port.
2325 * 'in_port' must be part of a bundle (e.g. in_port->bundle must be nonnull).
2326 *
2327 * 'vlan' must be the VLAN that corresponds to flow->vlan_tci on 'in_port', as
2328 * returned by input_vid_to_vlan(). It must be a valid VLAN for 'in_port', as
2329 * checked by input_vid_is_valid().
2330 *
2331 * May also add tags to '*tags', although the current implementation only does
2332 * so in one special case.
2333 */
2334static bool
2335is_admissible(struct xlate_ctx *ctx, struct xport *in_port,
2336 uint16_t vlan)
2337{
2338 struct xbundle *in_xbundle = in_port->xbundle;
2339 const struct xbridge *xbridge = ctx->xbridge;
2340 struct flow *flow = &ctx->xin->flow;
2341
2342 /* Drop frames for reserved multicast addresses
2343 * only if forward_bpdu option is absent. */
2344 if (!xbridge->forward_bpdu && eth_addr_is_reserved(flow->dl_dst)) {
2d9b49dd
BP
2345 xlate_report(ctx, OFT_DETAIL,
2346 "packet has reserved destination MAC, dropping");
ff69c24a
FL
2347 return false;
2348 }
2349
2350 if (in_xbundle->bond) {
2351 struct mac_entry *mac;
2352
2353 switch (bond_check_admissibility(in_xbundle->bond, in_port->ofport,
2354 flow->dl_dst)) {
2355 case BV_ACCEPT:
2356 break;
2357
2358 case BV_DROP:
2d9b49dd
BP
2359 xlate_report(ctx, OFT_DETAIL,
2360 "bonding refused admissibility, dropping");
ff69c24a
FL
2361 return false;
2362
2363 case BV_DROP_IF_MOVED:
2364 ovs_rwlock_rdlock(&xbridge->ml->rwlock);
2365 mac = mac_learning_lookup(xbridge->ml, flow->dl_src, vlan);
9d078ec2
BP
2366 if (mac
2367 && mac_entry_get_port(xbridge->ml, mac) != in_xbundle->ofbundle
49a73e0c 2368 && (!is_gratuitous_arp(flow, ctx->wc)
9d078ec2 2369 || mac_entry_is_grat_arp_locked(mac))) {
ff69c24a 2370 ovs_rwlock_unlock(&xbridge->ml->rwlock);
2d9b49dd
BP
2371 xlate_report(ctx, OFT_DETAIL,
2372 "SLB bond thinks this packet looped back, "
ff69c24a
FL
2373 "dropping");
2374 return false;
2375 }
2376 ovs_rwlock_unlock(&xbridge->ml->rwlock);
2377 break;
2378 }
2379 }
2380
2381 return true;
2382}
2383
2d9b49dd
BP
2384static bool
2385update_learning_table__(const struct xbridge *xbridge,
2386 struct xbundle *in_xbundle, struct eth_addr dl_src,
2387 int vlan, bool is_grat_arp)
2388{
2389 return (in_xbundle == &ofpp_none_bundle
2390 || !mac_learning_update(xbridge->ml, dl_src, vlan,
2391 is_grat_arp,
2392 in_xbundle->bond != NULL,
2393 in_xbundle->ofbundle));
2394}
2395
ee047520 2396static void
2d9b49dd 2397update_learning_table(const struct xlate_ctx *ctx,
064799a1
JR
2398 struct xbundle *in_xbundle, struct eth_addr dl_src,
2399 int vlan, bool is_grat_arp)
ee047520 2400{
2d9b49dd
BP
2401 if (!update_learning_table__(ctx->xbridge, in_xbundle, dl_src, vlan,
2402 is_grat_arp)) {
2403 xlate_report_debug(ctx, OFT_DETAIL, "learned that "ETH_ADDR_FMT" is "
2404 "on port %s in VLAN %d",
2405 ETH_ADDR_ARGS(dl_src), in_xbundle->name, vlan);
ee047520 2406 }
9583bc14
EJ
2407}
2408
86e2dcdd
FL
2409/* Updates multicast snooping table 'ms' given that a packet matching 'flow'
2410 * was received on 'in_xbundle' in 'vlan' and is either Report or Query. */
2411static void
2d9b49dd 2412update_mcast_snooping_table4__(const struct xlate_ctx *ctx,
06994f87
TLSC
2413 const struct flow *flow,
2414 struct mcast_snooping *ms, int vlan,
2415 struct xbundle *in_xbundle,
2416 const struct dp_packet *packet)
86e2dcdd
FL
2417 OVS_REQ_WRLOCK(ms->rwlock)
2418{
46445c63 2419 const struct igmp_header *igmp;
e3102e42 2420 int count;
46445c63 2421 size_t offset;
06994f87 2422 ovs_be32 ip4 = flow->igmp_group_ip4;
86e2dcdd 2423
46445c63
EC
2424 offset = (char *) dp_packet_l4(packet) - (char *) dp_packet_data(packet);
2425 igmp = dp_packet_at(packet, offset, IGMP_HEADER_LEN);
2426 if (!igmp || csum(igmp, dp_packet_l4_size(packet)) != 0) {
2d9b49dd
BP
2427 xlate_report_debug(ctx, OFT_DETAIL,
2428 "multicast snooping received bad IGMP "
2429 "checksum on port %s in VLAN %d",
2430 in_xbundle->name, vlan);
46445c63
EC
2431 return;
2432 }
2433
86e2dcdd
FL
2434 switch (ntohs(flow->tp_src)) {
2435 case IGMP_HOST_MEMBERSHIP_REPORT:
2436 case IGMPV2_HOST_MEMBERSHIP_REPORT:
964a4d5f 2437 if (mcast_snooping_add_group4(ms, ip4, vlan, in_xbundle->ofbundle)) {
2d9b49dd
BP
2438 xlate_report_debug(ctx, OFT_DETAIL,
2439 "multicast snooping learned that "
2440 IP_FMT" is on port %s in VLAN %d",
2441 IP_ARGS(ip4), in_xbundle->name, vlan);
86e2dcdd
FL
2442 }
2443 break;
2444 case IGMP_HOST_LEAVE_MESSAGE:
964a4d5f 2445 if (mcast_snooping_leave_group4(ms, ip4, vlan, in_xbundle->ofbundle)) {
2d9b49dd
BP
2446 xlate_report_debug(ctx, OFT_DETAIL, "multicast snooping leaving "
2447 IP_FMT" is on port %s in VLAN %d",
2448 IP_ARGS(ip4), in_xbundle->name, vlan);
86e2dcdd
FL
2449 }
2450 break;
2451 case IGMP_HOST_MEMBERSHIP_QUERY:
2452 if (flow->nw_src && mcast_snooping_add_mrouter(ms, vlan,
2d9b49dd
BP
2453 in_xbundle->ofbundle)) {
2454 xlate_report_debug(ctx, OFT_DETAIL, "multicast snooping query "
2455 "from "IP_FMT" is on port %s in VLAN %d",
2456 IP_ARGS(flow->nw_src), in_xbundle->name, vlan);
86e2dcdd
FL
2457 }
2458 break;
e3102e42 2459 case IGMPV3_HOST_MEMBERSHIP_REPORT:
2d9b49dd
BP
2460 count = mcast_snooping_add_report(ms, packet, vlan,
2461 in_xbundle->ofbundle);
2462 if (count) {
2463 xlate_report_debug(ctx, OFT_DETAIL, "multicast snooping processed "
2464 "%d addresses on port %s in VLAN %d",
2465 count, in_xbundle->name, vlan);
e3102e42
TLSC
2466 }
2467 break;
86e2dcdd
FL
2468 }
2469}
2470
06994f87 2471static void
2d9b49dd 2472update_mcast_snooping_table6__(const struct xlate_ctx *ctx,
06994f87
TLSC
2473 const struct flow *flow,
2474 struct mcast_snooping *ms, int vlan,
2475 struct xbundle *in_xbundle,
2476 const struct dp_packet *packet)
2477 OVS_REQ_WRLOCK(ms->rwlock)
2478{
46445c63 2479 const struct mld_header *mld;
06994f87 2480 int count;
46445c63
EC
2481 size_t offset;
2482
2483 offset = (char *) dp_packet_l4(packet) - (char *) dp_packet_data(packet);
2484 mld = dp_packet_at(packet, offset, MLD_HEADER_LEN);
2485
2486 if (!mld ||
2487 packet_csum_upperlayer6(dp_packet_l3(packet),
2488 mld, IPPROTO_ICMPV6,
2489 dp_packet_l4_size(packet)) != 0) {
2d9b49dd
BP
2490 xlate_report_debug(ctx, OFT_DETAIL, "multicast snooping received "
2491 "bad MLD checksum on port %s in VLAN %d",
2492 in_xbundle->name, vlan);
46445c63
EC
2493 return;
2494 }
06994f87
TLSC
2495
2496 switch (ntohs(flow->tp_src)) {
2497 case MLD_QUERY:
2498 if (!ipv6_addr_equals(&flow->ipv6_src, &in6addr_any)
2499 && mcast_snooping_add_mrouter(ms, vlan, in_xbundle->ofbundle)) {
2d9b49dd
BP
2500 xlate_report_debug(ctx, OFT_DETAIL, "multicast snooping query on "
2501 "port %s in VLAN %d", in_xbundle->name, vlan);
06994f87
TLSC
2502 }
2503 break;
2504 case MLD_REPORT:
2505 case MLD_DONE:
2506 case MLD2_REPORT:
2507 count = mcast_snooping_add_mld(ms, packet, vlan, in_xbundle->ofbundle);
2508 if (count) {
2d9b49dd
BP
2509 xlate_report_debug(ctx, OFT_DETAIL, "multicast snooping processed "
2510 "%d addresses on port %s in VLAN %d",
2511 count, in_xbundle->name, vlan);
06994f87
TLSC
2512 }
2513 break;
2514 }
2515}
2516
86e2dcdd
FL
2517/* Updates multicast snooping table 'ms' given that a packet matching 'flow'
2518 * was received on 'in_xbundle' in 'vlan'. */
2519static void
2d9b49dd 2520update_mcast_snooping_table(const struct xlate_ctx *ctx,
86e2dcdd 2521 const struct flow *flow, int vlan,
e3102e42
TLSC
2522 struct xbundle *in_xbundle,
2523 const struct dp_packet *packet)
86e2dcdd 2524{
2d9b49dd 2525 struct mcast_snooping *ms = ctx->xbridge->ms;
86e2dcdd 2526 struct xbundle *mcast_xbundle;
f4ae6e23 2527 struct mcast_port_bundle *fport;
86e2dcdd
FL
2528
2529 /* Don't learn the OFPP_NONE port. */
2530 if (in_xbundle == &ofpp_none_bundle) {
2531 return;
2532 }
2533
2534 /* Don't learn from flood ports */
2535 mcast_xbundle = NULL;
2536 ovs_rwlock_wrlock(&ms->rwlock);
f4ae6e23 2537 LIST_FOR_EACH(fport, node, &ms->fport_list) {
68f515ca 2538 mcast_xbundle = xbundle_lookup(ctx->xcfg, fport->port);
86e2dcdd
FL
2539 if (mcast_xbundle == in_xbundle) {
2540 break;
2541 }
2542 }
2543
2544 if (!mcast_xbundle || mcast_xbundle != in_xbundle) {
06994f87 2545 if (flow->dl_type == htons(ETH_TYPE_IP)) {
2d9b49dd 2546 update_mcast_snooping_table4__(ctx, flow, ms, vlan,
06994f87
TLSC
2547 in_xbundle, packet);
2548 } else {
2d9b49dd 2549 update_mcast_snooping_table6__(ctx, flow, ms, vlan,
06994f87
TLSC
2550 in_xbundle, packet);
2551 }
86e2dcdd
FL
2552 }
2553 ovs_rwlock_unlock(&ms->rwlock);
2554}
2555
2556/* send the packet to ports having the multicast group learned */
2557static void
2558xlate_normal_mcast_send_group(struct xlate_ctx *ctx,
2559 struct mcast_snooping *ms OVS_UNUSED,
2560 struct mcast_group *grp,
f0fb825a
EG
2561 struct xbundle *in_xbundle,
2562 const struct xvlan *xvlan)
86e2dcdd
FL
2563 OVS_REQ_RDLOCK(ms->rwlock)
2564{
86e2dcdd
FL
2565 struct mcast_group_bundle *b;
2566 struct xbundle *mcast_xbundle;
2567
86e2dcdd 2568 LIST_FOR_EACH(b, bundle_node, &grp->bundle_lru) {
68f515ca 2569 mcast_xbundle = xbundle_lookup(ctx->xcfg, b->port);
86e2dcdd 2570 if (mcast_xbundle && mcast_xbundle != in_xbundle) {
2d9b49dd 2571 xlate_report(ctx, OFT_DETAIL, "forwarding to mcast group port");
f0fb825a 2572 output_normal(ctx, mcast_xbundle, xvlan);
86e2dcdd 2573 } else if (!mcast_xbundle) {
2d9b49dd
BP
2574 xlate_report(ctx, OFT_WARN,
2575 "mcast group port is unknown, dropping");
86e2dcdd 2576 } else {
2d9b49dd
BP
2577 xlate_report(ctx, OFT_DETAIL,
2578 "mcast group port is input port, dropping");
86e2dcdd
FL
2579 }
2580 }
2581}
2582
2583/* send the packet to ports connected to multicast routers */
2584static void
2585xlate_normal_mcast_send_mrouters(struct xlate_ctx *ctx,
2586 struct mcast_snooping *ms,
f0fb825a
EG
2587 struct xbundle *in_xbundle,
2588 const struct xvlan *xvlan)
86e2dcdd
FL
2589 OVS_REQ_RDLOCK(ms->rwlock)
2590{
86e2dcdd
FL
2591 struct mcast_mrouter_bundle *mrouter;
2592 struct xbundle *mcast_xbundle;
2593
86e2dcdd 2594 LIST_FOR_EACH(mrouter, mrouter_node, &ms->mrouter_lru) {
68f515ca 2595 mcast_xbundle = xbundle_lookup(ctx->xcfg, mrouter->port);
94a881c1 2596 if (mcast_xbundle && mcast_xbundle != in_xbundle
f0fb825a 2597 && mrouter->vlan == xvlan->v[0].vid) {
2d9b49dd 2598 xlate_report(ctx, OFT_DETAIL, "forwarding to mcast router port");
f0fb825a 2599 output_normal(ctx, mcast_xbundle, xvlan);
86e2dcdd 2600 } else if (!mcast_xbundle) {
2d9b49dd
BP
2601 xlate_report(ctx, OFT_WARN,
2602 "mcast router port is unknown, dropping");
f0fb825a 2603 } else if (mrouter->vlan != xvlan->v[0].vid) {
2d9b49dd
BP
2604 xlate_report(ctx, OFT_DETAIL,
2605 "mcast router is on another vlan, dropping");
86e2dcdd 2606 } else {
2d9b49dd
BP
2607 xlate_report(ctx, OFT_DETAIL,
2608 "mcast router port is input port, dropping");
86e2dcdd
FL
2609 }
2610 }
2611}
2612
2613/* send the packet to ports flagged to be flooded */
2614static void
2615xlate_normal_mcast_send_fports(struct xlate_ctx *ctx,
2616 struct mcast_snooping *ms,
f0fb825a
EG
2617 struct xbundle *in_xbundle,
2618 const struct xvlan *xvlan)
86e2dcdd
FL
2619 OVS_REQ_RDLOCK(ms->rwlock)
2620{
f4ae6e23 2621 struct mcast_port_bundle *fport;
86e2dcdd
FL
2622 struct xbundle *mcast_xbundle;
2623
f4ae6e23 2624 LIST_FOR_EACH(fport, node, &ms->fport_list) {
68f515ca 2625 mcast_xbundle = xbundle_lookup(ctx->xcfg, fport->port);
86e2dcdd 2626 if (mcast_xbundle && mcast_xbundle != in_xbundle) {
2d9b49dd 2627 xlate_report(ctx, OFT_DETAIL, "forwarding to mcast flood port");
f0fb825a 2628 output_normal(ctx, mcast_xbundle, xvlan);
86e2dcdd 2629 } else if (!mcast_xbundle) {
2d9b49dd
BP
2630 xlate_report(ctx, OFT_WARN,
2631 "mcast flood port is unknown, dropping");
86e2dcdd 2632 } else {
2d9b49dd
BP
2633 xlate_report(ctx, OFT_DETAIL,
2634 "mcast flood port is input port, dropping");
86e2dcdd
FL
2635 }
2636 }
2637}
2638
8e04a33f
FL
2639/* forward the Reports to configured ports */
2640static void
2641xlate_normal_mcast_send_rports(struct xlate_ctx *ctx,
2642 struct mcast_snooping *ms,
f0fb825a
EG
2643 struct xbundle *in_xbundle,
2644 const struct xvlan *xvlan)
8e04a33f
FL
2645 OVS_REQ_RDLOCK(ms->rwlock)
2646{
8e04a33f
FL
2647 struct mcast_port_bundle *rport;
2648 struct xbundle *mcast_xbundle;
2649
8e04a33f 2650 LIST_FOR_EACH(rport, node, &ms->rport_list) {
68f515ca 2651 mcast_xbundle = xbundle_lookup(ctx->xcfg, rport->port);
0eb77c3a
LJ
2652 if (mcast_xbundle
2653 && mcast_xbundle != in_xbundle
2654 && mcast_xbundle->ofbundle != in_xbundle->ofbundle) {
2d9b49dd
BP
2655 xlate_report(ctx, OFT_DETAIL,
2656 "forwarding report to mcast flagged port");
f0fb825a 2657 output_normal(ctx, mcast_xbundle, xvlan);
8e04a33f 2658 } else if (!mcast_xbundle) {
2d9b49dd
BP
2659 xlate_report(ctx, OFT_WARN,
2660 "mcast port is unknown, dropping the report");
8e04a33f 2661 } else {
2d9b49dd
BP
2662 xlate_report(ctx, OFT_DETAIL,
2663 "mcast port is input port, dropping the Report");
8e04a33f
FL
2664 }
2665 }
2666}
2667
682800a4
FL
2668static void
2669xlate_normal_flood(struct xlate_ctx *ctx, struct xbundle *in_xbundle,
f0fb825a 2670 struct xvlan *xvlan)
682800a4
FL
2671{
2672 struct xbundle *xbundle;
2673
2674 LIST_FOR_EACH (xbundle, list_node, &ctx->xbridge->xbundles) {
2675 if (xbundle != in_xbundle
0eb77c3a 2676 && xbundle->ofbundle != in_xbundle->ofbundle
f0fb825a 2677 && xbundle_includes_vlan(xbundle, xvlan)
682800a4
FL
2678 && xbundle->floodable
2679 && !xbundle_mirror_out(ctx->xbridge, xbundle)) {
f0fb825a 2680 output_normal(ctx, xbundle, xvlan);
682800a4
FL
2681 }
2682 }
2031ef97 2683 ctx->nf_output_iface = NF_OUT_FLOOD;
682800a4
FL
2684}
2685
a75636c8
BP
2686static bool
2687is_ip_local_multicast(const struct flow *flow, struct flow_wildcards *wc)
2688{
2689 if (flow->dl_type == htons(ETH_TYPE_IP)) {
2690 memset(&wc->masks.nw_dst, 0xff, sizeof wc->masks.nw_dst);
2691 return ip_is_local_multicast(flow->nw_dst);
2692 } else if (flow->dl_type == htons(ETH_TYPE_IPV6)) {
2693 memset(&wc->masks.ipv6_dst, 0xff, sizeof wc->masks.ipv6_dst);
2694 return ipv6_is_all_hosts(&flow->ipv6_dst);
2695 } else {
2696 return false;
2697 }
2698}
2699
9583bc14
EJ
2700static void
2701xlate_normal(struct xlate_ctx *ctx)
2702{
49a73e0c 2703 struct flow_wildcards *wc = ctx->wc;
33bf9176 2704 struct flow *flow = &ctx->xin->flow;
46c88433
EJ
2705 struct xbundle *in_xbundle;
2706 struct xport *in_port;
9583bc14 2707 struct mac_entry *mac;
d6d5bbc9 2708 void *mac_port;
f0fb825a
EG
2709 struct xvlan in_xvlan;
2710 struct xvlan xvlan;
9583bc14 2711 uint16_t vlan;
9583bc14 2712
33bf9176
BP
2713 memset(&wc->masks.dl_src, 0xff, sizeof wc->masks.dl_src);
2714 memset(&wc->masks.dl_dst, 0xff, sizeof wc->masks.dl_dst);
f0fb825a 2715 wc->masks.vlans[0].tci |= htons(VLAN_VID_MASK | VLAN_CFI);
9583bc14 2716
2d9b49dd 2717 in_xbundle = lookup_input_bundle(ctx, flow->in_port.ofp_port, &in_port);
46c88433 2718 if (!in_xbundle) {
2d9b49dd 2719 xlate_report(ctx, OFT_WARN, "no input bundle, dropping");
9583bc14
EJ
2720 return;
2721 }
2722
2723 /* Drop malformed frames. */
f0fb825a
EG
2724 if (eth_type_vlan(flow->dl_type) &&
2725 !(flow->vlans[0].tci & htons(VLAN_CFI))) {
9583bc14 2726 if (ctx->xin->packet != NULL) {
2d9b49dd
BP
2727 xlate_report_error(ctx, "dropping packet with partial "
2728 "VLAN tag received on port %s",
2729 in_xbundle->name);
9583bc14 2730 }
2d9b49dd 2731 xlate_report(ctx, OFT_WARN, "partial VLAN tag, dropping");
9583bc14
EJ
2732 return;
2733 }
2734
2735 /* Drop frames on bundles reserved for mirroring. */
46c88433 2736 if (xbundle_mirror_out(ctx->xbridge, in_xbundle)) {
9583bc14 2737 if (ctx->xin->packet != NULL) {
2d9b49dd
BP
2738 xlate_report_error(ctx, "dropping packet received on port %s, "
2739 "which is reserved exclusively for mirroring",
2740 in_xbundle->name);
9583bc14 2741 }
2d9b49dd
BP
2742 xlate_report(ctx, OFT_WARN,
2743 "input port is mirror output port, dropping");
9583bc14
EJ
2744 return;
2745 }
2746
2747 /* Check VLAN. */
f0fb825a
EG
2748 xvlan_extract(flow, &in_xvlan);
2749 if (!input_vid_is_valid(ctx, in_xvlan.v[0].vid, in_xbundle)) {
2d9b49dd
BP
2750 xlate_report(ctx, OFT_WARN,
2751 "disallowed VLAN VID for this input port, dropping");
9583bc14
EJ
2752 return;
2753 }
f0fb825a
EG
2754 xvlan_input_translate(in_xbundle, &in_xvlan, &xvlan);
2755 vlan = xvlan.v[0].vid;
9583bc14
EJ
2756
2757 /* Check other admissibility requirements. */
2758 if (in_port && !is_admissible(ctx, in_port, vlan)) {
2759 return;
2760 }
2761
2762 /* Learn source MAC. */
064799a1 2763 bool is_grat_arp = is_gratuitous_arp(flow, wc);
875ab130
BP
2764 if (ctx->xin->allow_side_effects
2765 && flow->packet_type == htonl(PT_ETH)
2766 && in_port->pt_mode != NETDEV_PT_LEGACY_L3
2767 ) {
2d9b49dd 2768 update_learning_table(ctx, in_xbundle, flow->dl_src, vlan,
064799a1 2769 is_grat_arp);
9583bc14 2770 }
064799a1 2771 if (ctx->xin->xcache && in_xbundle != &ofpp_none_bundle) {
b256dc52
JS
2772 struct xc_entry *entry;
2773
064799a1 2774 /* Save just enough info to update mac learning table later. */
b256dc52 2775 entry = xlate_cache_add_entry(ctx->xin->xcache, XC_NORMAL);
901a517e
JR
2776 entry->normal.ofproto = ctx->xbridge->ofproto;
2777 entry->normal.in_port = flow->in_port.ofp_port;
2778 entry->normal.dl_src = flow->dl_src;
2779 entry->normal.vlan = vlan;
2780 entry->normal.is_gratuitous_arp = is_grat_arp;
b256dc52 2781 }
9583bc14
EJ
2782
2783 /* Determine output bundle. */
86e2dcdd
FL
2784 if (mcast_snooping_enabled(ctx->xbridge->ms)
2785 && !eth_addr_is_broadcast(flow->dl_dst)
2786 && eth_addr_is_multicast(flow->dl_dst)
06994f87 2787 && is_ip_any(flow)) {
86e2dcdd 2788 struct mcast_snooping *ms = ctx->xbridge->ms;
06994f87 2789 struct mcast_group *grp = NULL;
86e2dcdd 2790
a75636c8 2791 if (is_igmp(flow, wc)) {
1bc24169
BP
2792 /*
2793 * IGMP packets need to take the slow path, in order to be
2794 * processed for mdb updates. That will prevent expires
2795 * firing off even after hosts have sent reports.
2796 */
2797 ctx->xout->slow |= SLOW_ACTION;
2798
a75636c8 2799 memset(&wc->masks.tp_src, 0xff, sizeof wc->masks.tp_src);
d29f137b
TLSC
2800 if (mcast_snooping_is_membership(flow->tp_src) ||
2801 mcast_snooping_is_query(flow->tp_src)) {
df70a773 2802 if (ctx->xin->allow_side_effects && ctx->xin->packet) {
2d9b49dd 2803 update_mcast_snooping_table(ctx, flow, vlan,
e3102e42 2804 in_xbundle, ctx->xin->packet);
d29f137b 2805 }
86e2dcdd 2806 }
d6d5bbc9 2807
86e2dcdd
FL
2808 if (mcast_snooping_is_membership(flow->tp_src)) {
2809 ovs_rwlock_rdlock(&ms->rwlock);
f0fb825a 2810 xlate_normal_mcast_send_mrouters(ctx, ms, in_xbundle, &xvlan);
8e04a33f
FL
2811 /* RFC4541: section 2.1.1, item 1: A snooping switch should
2812 * forward IGMP Membership Reports only to those ports where
2813 * multicast routers are attached. Alternatively stated: a
2814 * snooping switch should not forward IGMP Membership Reports
2815 * to ports on which only hosts are attached.
2816 * An administrative control may be provided to override this
2817 * restriction, allowing the report messages to be flooded to
2818 * other ports. */
f0fb825a 2819 xlate_normal_mcast_send_rports(ctx, ms, in_xbundle, &xvlan);
86e2dcdd
FL
2820 ovs_rwlock_unlock(&ms->rwlock);
2821 } else {
2d9b49dd 2822 xlate_report(ctx, OFT_DETAIL, "multicast traffic, flooding");
f0fb825a 2823 xlate_normal_flood(ctx, in_xbundle, &xvlan);
86e2dcdd
FL
2824 }
2825 return;
a75636c8 2826 } else if (is_mld(flow, wc)) {
06994f87 2827 ctx->xout->slow |= SLOW_ACTION;
df70a773 2828 if (ctx->xin->allow_side_effects && ctx->xin->packet) {
2d9b49dd 2829 update_mcast_snooping_table(ctx, flow, vlan,
06994f87
TLSC
2830 in_xbundle, ctx->xin->packet);
2831 }
a75636c8 2832 if (is_mld_report(flow, wc)) {
06994f87 2833 ovs_rwlock_rdlock(&ms->rwlock);
f0fb825a
EG
2834 xlate_normal_mcast_send_mrouters(ctx, ms, in_xbundle, &xvlan);
2835 xlate_normal_mcast_send_rports(ctx, ms, in_xbundle, &xvlan);
06994f87
TLSC
2836 ovs_rwlock_unlock(&ms->rwlock);
2837 } else {
2d9b49dd 2838 xlate_report(ctx, OFT_DETAIL, "MLD query, flooding");
f0fb825a 2839 xlate_normal_flood(ctx, in_xbundle, &xvlan);
06994f87 2840 }
86e2dcdd 2841 } else {
a75636c8 2842 if (is_ip_local_multicast(flow, wc)) {
86e2dcdd
FL
2843 /* RFC4541: section 2.1.2, item 2: Packets with a dst IP
2844 * address in the 224.0.0.x range which are not IGMP must
2845 * be forwarded on all ports */
2d9b49dd
BP
2846 xlate_report(ctx, OFT_DETAIL,
2847 "RFC4541: section 2.1.2, item 2, flooding");
f0fb825a 2848 xlate_normal_flood(ctx, in_xbundle, &xvlan);
86e2dcdd
FL
2849 return;
2850 }
2851 }
2852
2853 /* forwarding to group base ports */
2854 ovs_rwlock_rdlock(&ms->rwlock);
06994f87
TLSC
2855 if (flow->dl_type == htons(ETH_TYPE_IP)) {
2856 grp = mcast_snooping_lookup4(ms, flow->nw_dst, vlan);
2857 } else if (flow->dl_type == htons(ETH_TYPE_IPV6)) {
2858 grp = mcast_snooping_lookup(ms, &flow->ipv6_dst, vlan);
2859 }
86e2dcdd 2860 if (grp) {
f0fb825a
EG
2861 xlate_normal_mcast_send_group(ctx, ms, grp, in_xbundle, &xvlan);
2862 xlate_normal_mcast_send_fports(ctx, ms, in_xbundle, &xvlan);
2863 xlate_normal_mcast_send_mrouters(ctx, ms, in_xbundle, &xvlan);
9583bc14 2864 } else {
86e2dcdd 2865 if (mcast_snooping_flood_unreg(ms)) {
2d9b49dd
BP
2866 xlate_report(ctx, OFT_DETAIL,
2867 "unregistered multicast, flooding");
f0fb825a 2868 xlate_normal_flood(ctx, in_xbundle, &xvlan);
86e2dcdd 2869 } else {
f0fb825a
EG
2870 xlate_normal_mcast_send_mrouters(ctx, ms, in_xbundle, &xvlan);
2871 xlate_normal_mcast_send_fports(ctx, ms, in_xbundle, &xvlan);
86e2dcdd 2872 }
9583bc14 2873 }
86e2dcdd 2874 ovs_rwlock_unlock(&ms->rwlock);
9583bc14 2875 } else {
86e2dcdd
FL
2876 ovs_rwlock_rdlock(&ctx->xbridge->ml->rwlock);
2877 mac = mac_learning_lookup(ctx->xbridge->ml, flow->dl_dst, vlan);
9d078ec2 2878 mac_port = mac ? mac_entry_get_port(ctx->xbridge->ml, mac) : NULL;
86e2dcdd
FL
2879 ovs_rwlock_unlock(&ctx->xbridge->ml->rwlock);
2880
2881 if (mac_port) {
68f515ca 2882 struct xbundle *mac_xbundle = xbundle_lookup(ctx->xcfg, mac_port);
0eb77c3a
LJ
2883 if (mac_xbundle
2884 && mac_xbundle != in_xbundle
2885 && mac_xbundle->ofbundle != in_xbundle->ofbundle) {
2d9b49dd 2886 xlate_report(ctx, OFT_DETAIL, "forwarding to learned port");
f0fb825a 2887 output_normal(ctx, mac_xbundle, &xvlan);
86e2dcdd 2888 } else if (!mac_xbundle) {
2d9b49dd
BP
2889 xlate_report(ctx, OFT_WARN,
2890 "learned port is unknown, dropping");
86e2dcdd 2891 } else {
2d9b49dd
BP
2892 xlate_report(ctx, OFT_DETAIL,
2893 "learned port is input port, dropping");
86e2dcdd
FL
2894 }
2895 } else {
2d9b49dd
BP
2896 xlate_report(ctx, OFT_DETAIL,
2897 "no learned MAC for destination, flooding");
f0fb825a 2898 xlate_normal_flood(ctx, in_xbundle, &xvlan);
86e2dcdd 2899 }
9583bc14
EJ
2900 }
2901}
2902
a6092018
BP
2903/* Appends a "sample" action for sFlow or IPFIX to 'ctx->odp_actions'. The
2904 * 'probability' is the number of packets out of UINT32_MAX to sample. The
8de6ff3e
JP
2905 * 'cookie' is passed back in the callback for each sampled packet.
2906 * 'tunnel_out_port', if not ODPP_NONE, is added as the
2907 * OVS_USERSPACE_ATTR_EGRESS_TUN_PORT attribute. If 'include_actions',
2908 * an OVS_USERSPACE_ATTR_ACTIONS attribute is added. If
2909 * 'emit_set_tunnel', sample(sampling_port=1) would translate into
2910 * datapath sample action set(tunnel(...)), sample(...) and it is used
2911 * for sampling egress tunnel information.
9583bc14
EJ
2912 */
2913static size_t
a6092018 2914compose_sample_action(struct xlate_ctx *ctx,
9583bc14 2915 const uint32_t probability,
8de6ff3e 2916 const struct user_action_cookie *cookie,
7321bda3
NM
2917 const odp_port_t tunnel_out_port,
2918 bool include_actions)
9583bc14 2919{
b97f2c3a
BY
2920 if (probability == 0) {
2921 /* No need to generate sampling or the inner action. */
2922 return 0;
2923 }
2924
31b29c2e
AZ
2925 /* If the slow path meter is configured by the controller,
2926 * insert a meter action before the user space action. */
2927 struct ofproto *ofproto = &ctx->xin->ofproto->up;
2928 uint32_t meter_id = ofproto->slowpath_meter_id;
2929
2930 /* When meter action is not required, avoid generate sample action
2931 * for 100% sampling rate. */
2932 bool is_sample = probability < UINT32_MAX || meter_id != UINT32_MAX;
72471622
BY
2933 size_t sample_offset, actions_offset;
2934 if (is_sample) {
2935 sample_offset = nl_msg_start_nested(ctx->odp_actions,
2936 OVS_ACTION_ATTR_SAMPLE);
2937 nl_msg_put_u32(ctx->odp_actions, OVS_SAMPLE_ATTR_PROBABILITY,
2938 probability);
2939 actions_offset = nl_msg_start_nested(ctx->odp_actions,
2940 OVS_SAMPLE_ATTR_ACTIONS);
2941 }
9583bc14 2942
31b29c2e
AZ
2943 if (meter_id != UINT32_MAX) {
2944 nl_msg_put_u32(ctx->odp_actions, OVS_ACTION_ATTR_METER, meter_id);
2945 }
2946
a6092018
BP
2947 odp_port_t odp_port = ofp_port_to_odp_port(
2948 ctx->xbridge, ctx->xin->flow.in_port.ofp_port);
2949 uint32_t pid = dpif_port_get_pid(ctx->xbridge->dpif, odp_port,
2950 flow_hash_5tuple(&ctx->xin->flow, 0));
8de6ff3e 2951 int cookie_offset = odp_put_userspace_action(pid, cookie, sizeof *cookie,
a6092018
BP
2952 tunnel_out_port,
2953 include_actions,
2954 ctx->odp_actions);
89a8a7f0 2955
72471622
BY
2956 if (is_sample) {
2957 nl_msg_end_nested(ctx->odp_actions, actions_offset);
2958 nl_msg_end_nested(ctx->odp_actions, sample_offset);
2959 }
9583bc14 2960
9583bc14
EJ
2961 return cookie_offset;
2962}
2963
a6092018
BP
2964/* If sFLow is not enabled, returns 0 without doing anything.
2965 *
2966 * If sFlow is enabled, appends a template "sample" action to the ODP actions
2967 * in 'ctx'. This action is a template because some of the information needed
2968 * to fill it out is not available until flow translation is complete. In this
2969 * case, this functions returns an offset, which is always nonzero, to pass
2970 * later to fix_sflow_action() to fill in the rest of the template. */
9583bc14 2971static size_t
a6092018 2972compose_sflow_action(struct xlate_ctx *ctx)
9583bc14 2973{
a6092018
BP
2974 struct dpif_sflow *sflow = ctx->xbridge->sflow;
2975 if (!sflow || ctx->xin->flow.in_port.ofp_port == OFPP_NONE) {
9583bc14
EJ
2976 return 0;
2977 }
2978
fcb9579b
JP
2979 struct user_action_cookie cookie = {
2980 .type = USER_ACTION_COOKIE_SFLOW,
2981 .ofp_in_port = ctx->xin->flow.in_port.ofp_port,
2982 .ofproto_uuid = ctx->xbridge->ofproto->uuid
2983 };
a6092018 2984 return compose_sample_action(ctx, dpif_sflow_get_probability(sflow),
8de6ff3e 2985 &cookie, ODPP_NONE, true);
9583bc14
EJ
2986}
2987
f69f713b
BY
2988/* If flow IPFIX is enabled, make sure IPFIX flow sample action
2989 * at egress point of tunnel port is just in front of corresponding
2990 * output action. If bridge IPFIX is enabled, this appends an IPFIX
2991 * sample action to 'ctx->odp_actions'. */
9583bc14 2992static void
a6092018 2993compose_ipfix_action(struct xlate_ctx *ctx, odp_port_t output_odp_port)
9583bc14 2994{
a6092018 2995 struct dpif_ipfix *ipfix = ctx->xbridge->ipfix;
8b7ea2d4 2996 odp_port_t tunnel_out_port = ODPP_NONE;
9583bc14 2997
a6092018 2998 if (!ipfix || ctx->xin->flow.in_port.ofp_port == OFPP_NONE) {
9583bc14
EJ
2999 return;
3000 }
3001
8b7ea2d4
WZ
3002 /* For input case, output_odp_port is ODPP_NONE, which is an invalid port
3003 * number. */
3004 if (output_odp_port == ODPP_NONE &&
a6092018 3005 !dpif_ipfix_get_bridge_exporter_input_sampling(ipfix)) {
8b7ea2d4
WZ
3006 return;
3007 }
3008
f69f713b 3009 /* For output case, output_odp_port is valid. */
8b7ea2d4 3010 if (output_odp_port != ODPP_NONE) {
a6092018 3011 if (!dpif_ipfix_get_bridge_exporter_output_sampling(ipfix)) {
8b7ea2d4
WZ
3012 return;
3013 }
3014 /* If tunnel sampling is enabled, put an additional option attribute:
3015 * OVS_USERSPACE_ATTR_TUNNEL_OUT_PORT
3016 */
a6092018 3017 if (dpif_ipfix_get_bridge_exporter_tunnel_sampling(ipfix) &&
cd32509e 3018 dpif_ipfix_is_tunnel_port(ipfix, output_odp_port) ) {
8b7ea2d4
WZ
3019 tunnel_out_port = output_odp_port;
3020 }
3021 }
3022
8de6ff3e
JP
3023 struct user_action_cookie cookie = {
3024 .type = USER_ACTION_COOKIE_IPFIX,
fcb9579b
JP
3025 .ofp_in_port = ctx->xin->flow.in_port.ofp_port,
3026 .ofproto_uuid = ctx->xbridge->ofproto->uuid,
3027 .ipfix.output_odp_port = output_odp_port
a6092018
BP
3028 };
3029 compose_sample_action(ctx,
3030 dpif_ipfix_get_bridge_exporter_probability(ipfix),
8de6ff3e 3031 &cookie, tunnel_out_port, false);
9583bc14
EJ
3032}
3033
a6092018
BP
3034/* Fix "sample" action according to data collected while composing ODP actions,
3035 * as described in compose_sflow_action().
3036 *
8de6ff3e
JP
3037 * 'user_cookie_offset' must be the offset returned by
3038 * compose_sflow_action(). */
9583bc14 3039static void
a6092018 3040fix_sflow_action(struct xlate_ctx *ctx, unsigned int user_cookie_offset)
9583bc14
EJ
3041{
3042 const struct flow *base = &ctx->base_flow;
8de6ff3e 3043 struct user_action_cookie *cookie;
9583bc14 3044
8de6ff3e 3045 cookie = ofpbuf_at(ctx->odp_actions, user_cookie_offset, sizeof *cookie);
9583bc14
EJ
3046 ovs_assert(cookie->type == USER_ACTION_COOKIE_SFLOW);
3047
f0fb825a 3048 cookie->sflow.vlan_tci = base->vlans[0].tci;
a6092018
BP
3049
3050 /* See http://www.sflow.org/sflow_version_5.txt (search for "Input/output
3051 * port information") for the interpretation of cookie->output. */
3052 switch (ctx->sflow_n_outputs) {
3053 case 0:
3054 /* 0x40000000 | 256 means "packet dropped for unknown reason". */
3055 cookie->sflow.output = 0x40000000 | 256;
3056 break;
3057
3058 case 1:
3059 cookie->sflow.output = dpif_sflow_odp_port_to_ifindex(
3060 ctx->xbridge->sflow, ctx->sflow_odp_port);
3061 if (cookie->sflow.output) {
3062 break;
3063 }
3064 /* Fall through. */
3065 default:
3066 /* 0x80000000 means "multiple output ports. */
3067 cookie->sflow.output = 0x80000000 | ctx->sflow_n_outputs;
3068 break;
3069 }
9583bc14
EJ
3070}
3071
515793d5
BP
3072static bool
3073process_special(struct xlate_ctx *ctx, const struct xport *xport)
db7d4e46 3074{
515793d5 3075 const struct flow *flow = &ctx->xin->flow;
49a73e0c 3076 struct flow_wildcards *wc = ctx->wc;
46c88433 3077 const struct xbridge *xbridge = ctx->xbridge;
515793d5
BP
3078 const struct dp_packet *packet = ctx->xin->packet;
3079 enum slow_path_reason slow;
642dc74d 3080
46c88433 3081 if (!xport) {
515793d5 3082 slow = 0;
46c88433 3083 } else if (xport->cfm && cfm_should_process_flow(xport->cfm, flow, wc)) {
db7d4e46 3084 if (packet) {
46c88433 3085 cfm_process_heartbeat(xport->cfm, packet);
db7d4e46 3086 }
515793d5 3087 slow = SLOW_CFM;
fab52e16 3088 } else if (xport->bfd && bfd_should_process_flow(xport->bfd, flow, wc)) {
db7d4e46 3089 if (packet) {
46c88433 3090 bfd_process_packet(xport->bfd, flow, packet);
60d02c72
AW
3091 /* If POLL received, immediately sends FINAL back. */
3092 if (bfd_should_send_packet(xport->bfd)) {
6d308b28 3093 ofproto_dpif_monitor_port_send_soon(xport->ofport);
60d02c72 3094 }
db7d4e46 3095 }
515793d5 3096 slow = SLOW_BFD;
46c88433 3097 } else if (xport->xbundle && xport->xbundle->lacp
db7d4e46
JP
3098 && flow->dl_type == htons(ETH_TYPE_LACP)) {
3099 if (packet) {
46c88433 3100 lacp_process_packet(xport->xbundle->lacp, xport->ofport, packet);
db7d4e46 3101 }
515793d5 3102 slow = SLOW_LACP;
9efd308e
DV
3103 } else if ((xbridge->stp || xbridge->rstp) &&
3104 stp_should_process_flow(flow, wc)) {
db7d4e46 3105 if (packet) {
f025bcb7
JR
3106 xbridge->stp
3107 ? stp_process_packet(xport, packet)
3108 : rstp_process_packet(xport, packet);
db7d4e46 3109 }
515793d5 3110 slow = SLOW_STP;
19aef6ef 3111 } else if (xport->lldp && lldp_should_process_flow(xport->lldp, flow)) {
0477baa9
DF
3112 if (packet) {
3113 lldp_process_packet(xport->lldp, packet);
3114 }
515793d5 3115 slow = SLOW_LLDP;
db7d4e46 3116 } else {
515793d5
BP
3117 slow = 0;
3118 }
3119
3120 if (slow) {
3121 ctx->xout->slow |= slow;
3122 return true;
3123 } else {
3124 return false;
db7d4e46
JP
3125 }
3126}
3127
a36de779 3128static int
68f515ca
HH
3129tnl_route_lookup_flow(const struct xlate_ctx *ctx,
3130 const struct flow *oflow,
a8704b50
PS
3131 struct in6_addr *ip, struct in6_addr *src,
3132 struct xport **out_port)
a36de779
PS
3133{
3134 char out_dev[IFNAMSIZ];
3135 struct xbridge *xbridge;
c2b878e0
TLSC
3136 struct in6_addr gw;
3137 struct in6_addr dst;
a36de779 3138
c2b878e0 3139 dst = flow_tnl_dst(&oflow->tunnel);
ed52ca57 3140 if (!ovs_router_lookup(oflow->pkt_mark, &dst, out_dev, src, &gw)) {
a36de779
PS
3141 return -ENOENT;
3142 }
3143
c2b878e0
TLSC
3144 if (ipv6_addr_is_set(&gw) &&
3145 (!IN6_IS_ADDR_V4MAPPED(&gw) || in6_addr_get_mapped_ipv4(&gw))) {
a36de779
PS
3146 *ip = gw;
3147 } else {
c2b878e0 3148 *ip = dst;
a36de779
PS
3149 }
3150
68f515ca 3151 HMAP_FOR_EACH (xbridge, hmap_node, &ctx->xcfg->xbridges) {
a36de779
PS
3152 if (!strncmp(xbridge->name, out_dev, IFNAMSIZ)) {
3153 struct xport *port;
3154
3155 HMAP_FOR_EACH (port, ofp_node, &xbridge->xports) {
3156 if (!strncmp(netdev_get_name(port->netdev), out_dev, IFNAMSIZ)) {
3157 *out_port = port;
3158 return 0;
3159 }
3160 }
3161 }
3162 }
3163 return -ENOENT;
3164}
3165
3166static int
cdd42eda
JG
3167compose_table_xlate(struct xlate_ctx *ctx, const struct xport *out_dev,
3168 struct dp_packet *packet)
a36de779 3169{
cdd42eda 3170 struct xbridge *xbridge = out_dev->xbridge;
a36de779
PS
3171 struct ofpact_output output;
3172 struct flow flow;
3173
3174 ofpact_init(&output.ofpact, OFPACT_OUTPUT, sizeof output);
cf62fa4c 3175 flow_extract(packet, &flow);
cdd42eda
JG
3176 flow.in_port.ofp_port = out_dev->ofp_port;
3177 output.port = OFPP_TABLE;
a36de779
PS
3178 output.max_len = 0;
3179
1f4a8933
JR
3180 return ofproto_dpif_execute_actions__(xbridge->ofproto,
3181 ctx->xin->tables_version, &flow,
3182 NULL, &output.ofpact, sizeof output,
2d9b49dd 3183 ctx->depth, ctx->resubmits, packet);
a36de779
PS
3184}
3185
c2b878e0
TLSC
3186static void
3187tnl_send_nd_request(struct xlate_ctx *ctx, const struct xport *out_dev,
3188 const struct eth_addr eth_src,
3189 struct in6_addr * ipv6_src, struct in6_addr * ipv6_dst)
3190{
3191 struct dp_packet packet;
3192
3193 dp_packet_init(&packet, 0);
16187903 3194 compose_nd_ns(&packet, eth_src, ipv6_src, ipv6_dst);
c2b878e0
TLSC
3195 compose_table_xlate(ctx, out_dev, &packet);
3196 dp_packet_uninit(&packet);
3197}
3198
a36de779 3199static void
cdd42eda 3200tnl_send_arp_request(struct xlate_ctx *ctx, const struct xport *out_dev,
74ff3298 3201 const struct eth_addr eth_src,
a36de779
PS
3202 ovs_be32 ip_src, ovs_be32 ip_dst)
3203{
cf62fa4c 3204 struct dp_packet packet;
a36de779 3205
cf62fa4c 3206 dp_packet_init(&packet, 0);
eb0b295e
BP
3207 compose_arp(&packet, ARP_OP_REQUEST,
3208 eth_src, eth_addr_zero, true, ip_src, ip_dst);
a36de779 3209
cdd42eda 3210 compose_table_xlate(ctx, out_dev, &packet);
cf62fa4c 3211 dp_packet_uninit(&packet);
a36de779
PS
3212}
3213
7c12dfc5
SC
3214static void
3215propagate_tunnel_data_to_flow__(struct flow *dst_flow,
3216 const struct flow *src_flow,
3217 struct eth_addr dmac, struct eth_addr smac,
3218 struct in6_addr s_ip6, ovs_be32 s_ip,
3219 bool is_tnl_ipv6, uint8_t nw_proto)
3220{
3221 dst_flow->dl_dst = dmac;
3222 dst_flow->dl_src = smac;
3223
3224 dst_flow->packet_type = htonl(PT_ETH);
3225 dst_flow->nw_dst = src_flow->tunnel.ip_dst;
3226 dst_flow->nw_src = src_flow->tunnel.ip_src;
3227 dst_flow->ipv6_dst = src_flow->tunnel.ipv6_dst;
3228 dst_flow->ipv6_src = src_flow->tunnel.ipv6_src;
3229
3230 dst_flow->nw_tos = src_flow->tunnel.ip_tos;
3231 dst_flow->nw_ttl = src_flow->tunnel.ip_ttl;
3232 dst_flow->tp_dst = src_flow->tunnel.tp_dst;
3233 dst_flow->tp_src = src_flow->tunnel.tp_src;
3234
3235 if (is_tnl_ipv6) {
3236 dst_flow->dl_type = htons(ETH_TYPE_IPV6);
3237 if (ipv6_mask_is_any(&dst_flow->ipv6_src)
3238 && !ipv6_mask_is_any(&s_ip6)) {
3239 dst_flow->ipv6_src = s_ip6;
3240 }
3241 } else {
3242 dst_flow->dl_type = htons(ETH_TYPE_IP);
3243 if (dst_flow->nw_src == 0 && s_ip) {
3244 dst_flow->nw_src = s_ip;
3245 }
3246 }
3247 dst_flow->nw_proto = nw_proto;
3248}
3249
3250/*
3251 * Populate the 'flow' and 'base_flow' L3 fields to do the post tunnel push
3252 * translations.
3253 */
3254static void
3255propagate_tunnel_data_to_flow(struct xlate_ctx *ctx, struct eth_addr dmac,
3256 struct eth_addr smac, struct in6_addr s_ip6,
3257 ovs_be32 s_ip, bool is_tnl_ipv6,
3258 enum ovs_vport_type tnl_type)
3259{
3260 struct flow *base_flow, *flow;
3261 flow = &ctx->xin->flow;
3262 base_flow = &ctx->base_flow;
3263 uint8_t nw_proto = 0;
3264
3265 switch (tnl_type) {
3266 case OVS_VPORT_TYPE_GRE:
3267 nw_proto = IPPROTO_GRE;
3268 break;
3269 case OVS_VPORT_TYPE_VXLAN:
3270 case OVS_VPORT_TYPE_GENEVE:
3271 nw_proto = IPPROTO_UDP;
3272 break;
3273 case OVS_VPORT_TYPE_LISP:
3274 case OVS_VPORT_TYPE_STT:
3275 case OVS_VPORT_TYPE_UNSPEC:
3276 case OVS_VPORT_TYPE_NETDEV:
3277 case OVS_VPORT_TYPE_INTERNAL:
3278 case __OVS_VPORT_TYPE_MAX:
3279 default:
3280 OVS_NOT_REACHED();
7c12dfc5
SC
3281 }
3282 /*
3283 * Update base_flow first followed by flow as the dst_flow gets modified
3284 * in the function.
3285 */
3286 propagate_tunnel_data_to_flow__(base_flow, flow, dmac, smac, s_ip6, s_ip,
3287 is_tnl_ipv6, nw_proto);
3288 propagate_tunnel_data_to_flow__(flow, flow, dmac, smac, s_ip6, s_ip,
3289 is_tnl_ipv6, nw_proto);
3290}
3291
a36de779 3292static int
11938578
AZ
3293native_tunnel_output(struct xlate_ctx *ctx, const struct xport *xport,
3294 const struct flow *flow, odp_port_t tunnel_odp_port,
3295 bool truncate)
a36de779 3296{
4975aa3e 3297 struct netdev_tnl_build_header_params tnl_params;
a36de779
PS
3298 struct ovs_action_push_tnl tnl_push_data;
3299 struct xport *out_dev = NULL;
c2b878e0
TLSC
3300 ovs_be32 s_ip = 0, d_ip = 0;
3301 struct in6_addr s_ip6 = in6addr_any;
3302 struct in6_addr d_ip6 = in6addr_any;
74ff3298
JR
3303 struct eth_addr smac;
3304 struct eth_addr dmac;
a36de779 3305 int err;
c2b878e0
TLSC
3306 char buf_sip6[INET6_ADDRSTRLEN];
3307 char buf_dip6[INET6_ADDRSTRLEN];
a36de779 3308
245d88bc
ZB
3309 /* Store sFlow data. */
3310 uint32_t sflow_n_outputs = ctx->sflow_n_outputs;
3311
7c12dfc5
SC
3312 /* Structures to backup Ethernet and IP of base_flow. */
3313 struct flow old_base_flow;
3314 struct flow old_flow;
3315
3316 /* Backup flow & base_flow data. */
3317 memcpy(&old_base_flow, &ctx->base_flow, sizeof old_base_flow);
3318 memcpy(&old_flow, &ctx->xin->flow, sizeof old_flow);
3319
68f515ca 3320 err = tnl_route_lookup_flow(ctx, flow, &d_ip6, &s_ip6, &out_dev);
a36de779 3321 if (err) {
2d9b49dd 3322 xlate_report(ctx, OFT_WARN, "native tunnel routing failed");
a36de779
PS
3323 return err;
3324 }
c2b878e0 3325
2d9b49dd 3326 xlate_report(ctx, OFT_DETAIL, "tunneling to %s via %s",
c2b878e0
TLSC
3327 ipv6_string_mapped(buf_dip6, &d_ip6),
3328 netdev_get_name(out_dev->netdev));
a36de779
PS
3329
3330 /* Use mac addr of bridge port of the peer. */
74ff3298 3331 err = netdev_get_etheraddr(out_dev->netdev, &smac);
a36de779 3332 if (err) {
2d9b49dd
BP
3333 xlate_report(ctx, OFT_WARN,
3334 "tunnel output device lacks Ethernet address");
a36de779
PS
3335 return err;
3336 }
3337
c2b878e0
TLSC
3338 d_ip = in6_addr_get_mapped_ipv4(&d_ip6);
3339 if (d_ip) {
a8704b50 3340 s_ip = in6_addr_get_mapped_ipv4(&s_ip6);
a36de779
PS
3341 }
3342
c2b878e0 3343 err = tnl_neigh_lookup(out_dev->xbridge->name, &d_ip6, &dmac);
a36de779 3344 if (err) {
2d9b49dd
BP
3345 xlate_report(ctx, OFT_DETAIL,
3346 "neighbor cache miss for %s on bridge %s, "
c2b878e0
TLSC
3347 "sending %s request",
3348 buf_dip6, out_dev->xbridge->name, d_ip ? "ARP" : "ND");
3349 if (d_ip) {
3350 tnl_send_arp_request(ctx, out_dev, smac, s_ip, d_ip);
3351 } else {
3352 tnl_send_nd_request(ctx, out_dev, smac, &s_ip6, &d_ip6);
3353 }
a36de779
PS
3354 return err;
3355 }
c2b878e0 3356
a36de779
PS
3357 if (ctx->xin->xcache) {
3358 struct xc_entry *entry;
3359
53902038 3360 entry = xlate_cache_add_entry(ctx->xin->xcache, XC_TNL_NEIGH);
901a517e
JR
3361 ovs_strlcpy(entry->tnl_neigh_cache.br_name, out_dev->xbridge->name,
3362 sizeof entry->tnl_neigh_cache.br_name);
3363 entry->tnl_neigh_cache.d_ipv6 = d_ip6;
a36de779 3364 }
81de18ec 3365
2d9b49dd 3366 xlate_report(ctx, OFT_DETAIL, "tunneling from "ETH_ADDR_FMT" %s"
c2b878e0
TLSC
3367 " to "ETH_ADDR_FMT" %s",
3368 ETH_ADDR_ARGS(smac), ipv6_string_mapped(buf_sip6, &s_ip6),
3369 ETH_ADDR_ARGS(dmac), buf_dip6);
3370
4975aa3e
PS
3371 netdev_init_tnl_build_header_params(&tnl_params, flow, &s_ip6, dmac, smac);
3372 err = tnl_port_build_header(xport->ofport, &tnl_push_data, &tnl_params);
a36de779
PS
3373 if (err) {
3374 return err;
3375 }
81765c00
BP
3376 tnl_push_data.tnl_port = tunnel_odp_port;
3377 tnl_push_data.out_port = out_dev->odp_port;
beb75a40 3378
7c12dfc5
SC
3379 /* After tunnel header has been added, MAC and IP data of flow and
3380 * base_flow need to be set properly, since there is not recirculation
3381 * any more when sending packet to tunnel. */
beb75a40 3382
11938578
AZ
3383 propagate_tunnel_data_to_flow(ctx, dmac, smac, s_ip6,
3384 s_ip, tnl_params.is_ipv6,
3385 tnl_push_data.tnl_type);
7c12dfc5 3386
11938578
AZ
3387 size_t clone_ofs = 0;
3388 size_t push_action_size;
7c12dfc5 3389
11938578
AZ
3390 clone_ofs = nl_msg_start_nested(ctx->odp_actions, OVS_ACTION_ATTR_CLONE);
3391 odp_put_tnl_push_action(ctx->odp_actions, &tnl_push_data);
3392 push_action_size = ctx->odp_actions->size;
3393
3394 if (!truncate) {
3395 const struct dpif_flow_stats *backup_resubmit_stats;
3396 struct xlate_cache *backup_xcache;
3397 struct flow_wildcards *backup_wc, wc;
3398 bool backup_side_effects;
3399 const struct dp_packet *backup_packet;
3400
3401 memset(&wc, 0 , sizeof wc);
3402 backup_wc = ctx->wc;
3403 ctx->wc = &wc;
3404 ctx->xin->wc = NULL;
3405 backup_resubmit_stats = ctx->xin->resubmit_stats;
3406 backup_xcache = ctx->xin->xcache;
3407 backup_side_effects = ctx->xin->allow_side_effects;
3408 backup_packet = ctx->xin->packet;
3409
3410 ctx->xin->resubmit_stats = NULL;
3411 ctx->xin->xcache = xlate_cache_new(); /* Use new temporary cache. */
3412 ctx->xin->allow_side_effects = false;
3413 ctx->xin->packet = NULL;
3414
3415 /* Push the cache entry for the tunnel first. */
3416 struct xc_entry *entry;
3417 entry = xlate_cache_add_entry(ctx->xin->xcache, XC_TUNNEL_HEADER);
3418 entry->tunnel_hdr.hdr_size = tnl_push_data.header_len;
3419 entry->tunnel_hdr.operation = ADD;
3420
3421 patch_port_output(ctx, xport, out_dev);
3422
3423 /* Similar to the stats update in revalidation, the x_cache entries
3424 * are populated by the previous translation are used to update the
3425 * stats correctly.
7c12dfc5 3426 */
11938578
AZ
3427 if (backup_resubmit_stats) {
3428 struct dpif_flow_stats stats = *backup_resubmit_stats;
3429 xlate_push_stats(ctx->xin->xcache, &stats);
3430 }
3431 xlate_cache_steal_entries(backup_xcache, ctx->xin->xcache);
3432
3433 if (ctx->odp_actions->size > push_action_size) {
3434 nl_msg_end_non_empty_nested(ctx->odp_actions, clone_ofs);
3435 } else {
3436 nl_msg_cancel_nested(ctx->odp_actions, clone_ofs);
3437 /* XXX : There is no real use-case for a tunnel push without
3438 * any post actions. However keeping it now
3439 * as is to make the 'make check' happy. Should remove when all the
3440 * make check tunnel test case does something meaningful on a
3441 * tunnel encap packets.
3442 */
3443 odp_put_tnl_push_action(ctx->odp_actions, &tnl_push_data);
3444 }
3445
3446 /* Restore context status. */
3447 ctx->xin->resubmit_stats = backup_resubmit_stats;
3448 xlate_cache_delete(ctx->xin->xcache);
3449 ctx->xin->xcache = backup_xcache;
3450 ctx->xin->allow_side_effects = backup_side_effects;
3451 ctx->xin->packet = backup_packet;
3452 ctx->wc = backup_wc;
3453 } else {
3454 /* In order to maintain accurate stats, use recirc for
3455 * natvie tunneling. */
7c12dfc5
SC
3456 nl_msg_put_u32(ctx->odp_actions, OVS_ACTION_ATTR_RECIRC, 0);
3457 nl_msg_end_nested(ctx->odp_actions, clone_ofs);
3458 }
11938578 3459
7c12dfc5
SC
3460 /* Restore the flows after the translation. */
3461 memcpy(&ctx->xin->flow, &old_flow, sizeof ctx->xin->flow);
3462 memcpy(&ctx->base_flow, &old_base_flow, sizeof ctx->base_flow);
245d88bc
ZB
3463
3464 /* Restore sFlow data. */
3465 ctx->sflow_n_outputs = sflow_n_outputs;
3466
a36de779
PS
3467 return 0;
3468}
3469
704bb0bf
JS
3470static void
3471xlate_commit_actions(struct xlate_ctx *ctx)
3472{
3473 bool use_masked = ctx->xbridge->support.masked_set_action;
3474
3475 ctx->xout->slow |= commit_odp_actions(&ctx->xin->flow, &ctx->base_flow,
3476 ctx->odp_actions, ctx->wc,
1fc11c59
JS
3477 use_masked, ctx->pending_encap,
3478 ctx->encap_data);
f839892a 3479 ctx->pending_encap = false;
1fc11c59
JS
3480 ofpbuf_delete(ctx->encap_data);
3481 ctx->encap_data = NULL;
704bb0bf
JS
3482}
3483
07659514 3484static void
72fe7578 3485clear_conntrack(struct xlate_ctx *ctx)
07659514 3486{
72fe7578 3487 ctx->conntracked = false;
6846e91e 3488 flow_clear_conntrack(&ctx->xin->flow);
07659514
JS
3489}
3490
58d636ee
BK
3491static bool
3492xlate_flow_is_protected(const struct xlate_ctx *ctx, const struct flow *flow, const struct xport *xport_out)
3493{
3494 const struct xport *xport_in;
3495
3496 if (!xport_out) {
3497 return false;
3498 }
3499
3500 xport_in = get_ofp_port(ctx->xbridge, flow->in_port.ofp_port);
3501
3502 return (xport_in && xport_in->xbundle && xport_out->xbundle &&
3503 xport_in->xbundle->protected && xport_out->xbundle->protected);
3504}
3505
48f704f4 3506/* Function handles when a packet is sent from one bridge to another bridge.
8bdb2bdb 3507 *
48f704f4
AZ
3508 * The bridges are internally connected, either with patch ports or with
3509 * tunnel ports.
3510 *
3511 * The output action to another bridge causes translation to continue within
3512 * the next bridge. This process can be recursive; the next bridge can
3513 * output yet to another bridge.
3514 *
3515 * The translated actions from the second bridge onwards are enclosed within
3516 * the clone action, so that any modification to the packet will not be visible
3517 * to the remaining actions of the originating bridge.
8bdb2bdb
SC
3518 */
3519static void
48f704f4
AZ
3520patch_port_output(struct xlate_ctx *ctx, const struct xport *in_dev,
3521 struct xport *out_dev)
8bdb2bdb
SC
3522{
3523 struct flow *flow = &ctx->xin->flow;
3524 struct flow old_flow = ctx->xin->flow;
3525 struct flow_tnl old_flow_tnl_wc = ctx->wc->masks.tunnel;
3526 bool old_conntrack = ctx->conntracked;
3527 bool old_was_mpls = ctx->was_mpls;
3528 ovs_version_t old_version = ctx->xin->tables_version;
3529 struct ofpbuf old_stack = ctx->stack;
3530 uint8_t new_stack[1024];
3531 struct ofpbuf old_action_set = ctx->action_set;
3532 struct ovs_list *old_trace = ctx->xin->trace;
3533 uint64_t actset_stub[1024 / 8];
3534
3535 ofpbuf_use_stub(&ctx->stack, new_stack, sizeof new_stack);
3536 ofpbuf_use_stub(&ctx->action_set, actset_stub, sizeof actset_stub);
3537 flow->in_port.ofp_port = out_dev->ofp_port;
3538 flow->metadata = htonll(0);
3539 memset(&flow->tunnel, 0, sizeof flow->tunnel);
86bfb29a 3540 memset(&ctx->wc->masks.tunnel, 0, sizeof ctx->wc->masks.tunnel);
8bdb2bdb
SC
3541 flow->tunnel.metadata.tab =
3542 ofproto_get_tun_tab(&out_dev->xbridge->ofproto->up);
3543 ctx->wc->masks.tunnel.metadata.tab = flow->tunnel.metadata.tab;
3544 memset(flow->regs, 0, sizeof flow->regs);
3545 flow->actset_output = OFPP_UNSET;
3546 clear_conntrack(ctx);
3547 ctx->xin->trace = xlate_report(ctx, OFT_BRIDGE, "bridge(\"%s\")",
3548 out_dev->xbridge->name);
3549 mirror_mask_t old_mirrors = ctx->mirrors;
3550 bool independent_mirrors = out_dev->xbridge != ctx->xbridge;
3551 if (independent_mirrors) {
3552 ctx->mirrors = 0;
3553 }
3554 ctx->xbridge = out_dev->xbridge;
3555
3556 /* The bridge is now known so obtain its table version. */
3557 ctx->xin->tables_version
3558 = ofproto_dpif_get_tables_version(ctx->xbridge->ofproto);
3559
3560 if (!process_special(ctx, out_dev) && may_receive(out_dev, ctx)) {
3561 if (xport_stp_forward_state(out_dev) &&
3562 xport_rstp_forward_state(out_dev)) {
3563 xlate_table_action(ctx, flow->in_port.ofp_port, 0, true, true,
96c3a6e5 3564 false, true, clone_xlate_actions);
8bdb2bdb
SC
3565 if (!ctx->freezing) {
3566 xlate_action_set(ctx);
3567 }
3568 if (ctx->freezing) {
3569 finish_freezing(ctx);
3570 }
3571 } else {
3572 /* Forwarding is disabled by STP and RSTP. Let OFPP_NORMAL and
3573 * the learning action look at the packet, then drop it. */
3574 struct flow old_base_flow = ctx->base_flow;
3575 size_t old_size = ctx->odp_actions->size;
3576 mirror_mask_t old_mirrors2 = ctx->mirrors;
3577
3578 xlate_table_action(ctx, flow->in_port.ofp_port, 0, true, true,
96c3a6e5 3579 false, true, clone_xlate_actions);
8bdb2bdb
SC
3580 ctx->mirrors = old_mirrors2;
3581 ctx->base_flow = old_base_flow;
3582 ctx->odp_actions->size = old_size;
3583
3584 /* Undo changes that may have been done for freezing. */
3585 ctx_cancel_freeze(ctx);
3586 }
3587 }
3588
3589 ctx->xin->trace = old_trace;
3590 if (independent_mirrors) {
3591 ctx->mirrors = old_mirrors;
3592 }
3593 ctx->xin->flow = old_flow;
3594 ctx->xbridge = in_dev->xbridge;
3595 ofpbuf_uninit(&ctx->action_set);
3596 ctx->action_set = old_action_set;
3597 ofpbuf_uninit(&ctx->stack);
3598 ctx->stack = old_stack;
3599
3600 /* Restore calling bridge's lookup version. */
3601 ctx->xin->tables_version = old_version;
3602
3603 /* Restore to calling bridge tunneling information */
3604 ctx->wc->masks.tunnel = old_flow_tnl_wc;
3605
3606 /* The out bridge popping MPLS should have no effect on the original
3607 * bridge. */
3608 ctx->was_mpls = old_was_mpls;
3609
3610 /* The out bridge's conntrack execution should have no effect on the
3611 * original bridge. */
3612 ctx->conntracked = old_conntrack;
3613
3614 /* The fact that the out bridge exits (for any reason) does not mean
3615 * that the original bridge should exit. Specifically, if the out
3616 * bridge freezes translation, the original bridge must continue
3617 * processing with the original, not the frozen packet! */
3618 ctx->exit = false;
3619
3620 /* Out bridge errors do not propagate back. */
3621 ctx->error = XLATE_OK;
3622
3623 if (ctx->xin->resubmit_stats) {
3624 netdev_vport_inc_tx(in_dev->netdev, ctx->xin->resubmit_stats);
3625 netdev_vport_inc_rx(out_dev->netdev, ctx->xin->resubmit_stats);
3626 if (out_dev->bfd) {
3627 bfd_account_rx(out_dev->bfd, ctx->xin->resubmit_stats);
3628 }
3629 }
3630 if (ctx->xin->xcache) {
3631 struct xc_entry *entry;
3632
3633 entry = xlate_cache_add_entry(ctx->xin->xcache, XC_NETDEV);
3634 entry->dev.tx = netdev_ref(in_dev->netdev);
3635 entry->dev.rx = netdev_ref(out_dev->netdev);
3636 entry->dev.bfd = bfd_ref(out_dev->bfd);
3637 }
3638}
3639
081617f0
JS
3640static bool
3641check_output_prerequisites(struct xlate_ctx *ctx,
3642 const struct xport *xport,
3643 struct flow *flow,
3644 bool check_stp)
9583bc14 3645{
49a73e0c 3646 struct flow_wildcards *wc = ctx->wc;
9583bc14 3647
46c88433 3648 if (!xport) {
2d9b49dd 3649 xlate_report(ctx, OFT_WARN, "Nonexistent output port");
081617f0 3650 return false;
46c88433 3651 } else if (xport->config & OFPUTIL_PC_NO_FWD) {
2d9b49dd 3652 xlate_report(ctx, OFT_DETAIL, "OFPPC_NO_FWD set, skipping output");
081617f0 3653 return false;
1356dbd1 3654 } else if (ctx->mirror_snaplen != 0 && xport->odp_port == ODPP_NONE) {
2d9b49dd
BP
3655 xlate_report(ctx, OFT_WARN,
3656 "Mirror truncate to ODPP_NONE, skipping output");
081617f0 3657 return false;
58d636ee 3658 } else if (xlate_flow_is_protected(ctx, flow, xport)) {
2d9b49dd
BP
3659 xlate_report(ctx, OFT_WARN,
3660 "Flow is between protected ports, skipping output.");
081617f0 3661 return false;
0d1cee12 3662 } else if (check_stp) {
bbbca389 3663 if (is_stp(&ctx->base_flow)) {
9efd308e
DV
3664 if (!xport_stp_should_forward_bpdu(xport) &&
3665 !xport_rstp_should_manage_bpdu(xport)) {
3666 if (ctx->xbridge->stp != NULL) {
2d9b49dd
BP
3667 xlate_report(ctx, OFT_WARN,
3668 "STP not in listening state, "
3669 "skipping bpdu output");
9efd308e 3670 } else if (ctx->xbridge->rstp != NULL) {
2d9b49dd
BP
3671 xlate_report(ctx, OFT_WARN,
3672 "RSTP not managing BPDU in this state, "
3673 "skipping bpdu output");
9efd308e 3674 }
081617f0 3675 return false;
0d1cee12 3676 }
67818616
MV
3677 } else if ((xport->cfm && cfm_should_process_flow(xport->cfm, flow, wc))
3678 || (xport->bfd && bfd_should_process_flow(xport->bfd, flow,
3679 wc))) {
3680 /* Pass; STP should not block link health detection. */
9efd308e
DV
3681 } else if (!xport_stp_forward_state(xport) ||
3682 !xport_rstp_forward_state(xport)) {
3683 if (ctx->xbridge->stp != NULL) {
2d9b49dd
BP
3684 xlate_report(ctx, OFT_WARN,
3685 "STP not in forwarding state, skipping output");
9efd308e 3686 } else if (ctx->xbridge->rstp != NULL) {
2d9b49dd
BP
3687 xlate_report(ctx, OFT_WARN,
3688 "RSTP not in forwarding state, skipping output");
9efd308e 3689 }
081617f0 3690 return false;
0d1cee12 3691 }
9583bc14 3692 }
5dbfe239
ZB
3693
3694 if (xport->pt_mode == NETDEV_PT_LEGACY_L2 &&
3695 flow->packet_type != htonl(PT_ETH)) {
3696 xlate_report(ctx, OFT_WARN, "Trying to send non-Ethernet packet "
3697 "through legacy L2 port. Dropping packet.");
3698 return false;
3699 }
3700
081617f0
JS
3701 return true;
3702}
3703
3704static bool
3705terminate_native_tunnel(struct xlate_ctx *ctx, ofp_port_t ofp_port,
3706 struct flow *flow, struct flow_wildcards *wc,
3707 odp_port_t *tnl_port)
3708{
3709 *tnl_port = ODPP_NONE;
3710
3711 /* XXX: Write better Filter for tunnel port. We can use in_port
3712 * in tunnel-port flow to avoid these checks completely. */
3713 if (ofp_port == OFPP_LOCAL &&
3714 ovs_native_tunneling_is_on(ctx->xbridge->ofproto)) {
3715 *tnl_port = tnl_port_map_lookup(flow, wc);
3716 }
3717
3718 return *tnl_port != ODPP_NONE;
3719}
3720
3721static void
3722compose_output_action__(struct xlate_ctx *ctx, ofp_port_t ofp_port,
feee58b9 3723 const struct xlate_bond_recirc *xr, bool check_stp,
11938578 3724 bool is_last_action OVS_UNUSED, bool truncate)
081617f0
JS
3725{
3726 const struct xport *xport = get_ofp_port(ctx->xbridge, ofp_port);
3727 struct flow_wildcards *wc = ctx->wc;
3728 struct flow *flow = &ctx->xin->flow;
3729 struct flow_tnl flow_tnl;
3730 union flow_vlan_hdr flow_vlans[FLOW_MAX_VLAN_HEADERS];
3731 uint8_t flow_nw_tos;
3732 odp_port_t out_port, odp_port, odp_tnl_port;
3733 bool is_native_tunnel = false;
3734 uint8_t dscp;
5dbfe239
ZB
3735 struct eth_addr flow_dl_dst = flow->dl_dst;
3736 struct eth_addr flow_dl_src = flow->dl_src;
3737 ovs_be32 flow_packet_type = flow->packet_type;
3738 ovs_be16 flow_dl_type = flow->dl_type;
081617f0
JS
3739
3740 /* If 'struct flow' gets additional metadata, we'll need to zero it out
3741 * before traversing a patch port. */
3d2fbd70 3742 BUILD_ASSERT_DECL(FLOW_WC_SEQ == 40);
081617f0
JS
3743 memset(&flow_tnl, 0, sizeof flow_tnl);
3744
3745 if (!check_output_prerequisites(ctx, xport, flow, check_stp)) {
3746 return;
3747 }
9583bc14 3748
875ab130
BP
3749 if (flow->packet_type == htonl(PT_ETH)) {
3750 /* Strip Ethernet header for legacy L3 port. */
3751 if (xport->pt_mode == NETDEV_PT_LEGACY_L3) {
3752 flow->packet_type = PACKET_TYPE_BE(OFPHTN_ETHERTYPE,
3753 ntohs(flow->dl_type));
3754 }
beb75a40
JS
3755 }
3756
46c88433 3757 if (xport->peer) {
5cb92182
BP
3758 if (truncate) {
3759 xlate_report_error(ctx, "Cannot truncate output to patch port");
3760 }
48f704f4 3761 patch_port_output(ctx, xport, xport->peer);
8bdb2bdb 3762 return;
9583bc14
EJ
3763 }
3764
f0fb825a 3765 memcpy(flow_vlans, flow->vlans, sizeof flow_vlans);
33bf9176 3766 flow_nw_tos = flow->nw_tos;
9583bc14 3767
16194afd
DDP
3768 if (count_skb_priorities(xport)) {
3769 memset(&wc->masks.skb_priority, 0xff, sizeof wc->masks.skb_priority);
3770 if (dscp_from_skb_priority(xport, flow->skb_priority, &dscp)) {
3771 wc->masks.nw_tos |= IP_DSCP_MASK;
3772 flow->nw_tos &= ~IP_DSCP_MASK;
3773 flow->nw_tos |= dscp;
3774 }
9583bc14
EJ
3775 }
3776
46c88433 3777 if (xport->is_tunnel) {
c2b878e0 3778 struct in6_addr dst;
9583bc14
EJ
3779 /* Save tunnel metadata so that changes made due to
3780 * the Logical (tunnel) Port are not visible for any further
3781 * matches, while explicit set actions on tunnel metadata are.
3782 */
a36de779 3783 flow_tnl = flow->tunnel;
49a73e0c 3784 odp_port = tnl_port_send(xport->ofport, flow, ctx->wc);
4e022ec0 3785 if (odp_port == ODPP_NONE) {
2d9b49dd 3786 xlate_report(ctx, OFT_WARN, "Tunneling decided against output");
9583bc14
EJ
3787 goto out; /* restore flow_nw_tos */
3788 }
c2b878e0
TLSC
3789 dst = flow_tnl_dst(&flow->tunnel);
3790 if (ipv6_addr_equals(&dst, &ctx->orig_tunnel_ipv6_dst)) {
2d9b49dd 3791 xlate_report(ctx, OFT_WARN, "Not tunneling to our own address");
9583bc14
EJ
3792 goto out; /* restore flow_nw_tos */
3793 }
3794 if (ctx->xin->resubmit_stats) {
46c88433 3795 netdev_vport_inc_tx(xport->netdev, ctx->xin->resubmit_stats);
9583bc14 3796 }
b256dc52
JS
3797 if (ctx->xin->xcache) {
3798 struct xc_entry *entry;
3799
3800 entry = xlate_cache_add_entry(ctx->xin->xcache, XC_NETDEV);
901a517e 3801 entry->dev.tx = netdev_ref(xport->netdev);
b256dc52 3802 }
9583bc14 3803 out_port = odp_port;
a36de779 3804 if (ovs_native_tunneling_is_on(ctx->xbridge->ofproto)) {
2d9b49dd 3805 xlate_report(ctx, OFT_DETAIL, "output to native tunnel");
081617f0 3806 is_native_tunnel = true;
a36de779 3807 } else {
2d9b49dd 3808 xlate_report(ctx, OFT_DETAIL, "output to kernel tunnel");
1520ef4f 3809 commit_odp_tunnel_action(flow, &ctx->base_flow, ctx->odp_actions);
a36de779
PS
3810 flow->tunnel = flow_tnl; /* Restore tunnel metadata */
3811 }
9583bc14 3812 } else {
46c88433 3813 odp_port = xport->odp_port;
7614e5d0 3814 out_port = odp_port;
9583bc14 3815 }
9583bc14 3816
4e022ec0 3817 if (out_port != ODPP_NONE) {
081617f0 3818 /* Commit accumulated flow updates before output. */
704bb0bf 3819 xlate_commit_actions(ctx);
adcf00ba 3820
e93ef1c7 3821 if (xr) {
081617f0 3822 /* Recirculate the packet. */
347bf289 3823 struct ovs_action_hash *act_hash;
adcf00ba 3824
347bf289 3825 /* Hash action. */
1520ef4f 3826 act_hash = nl_msg_put_unspec_uninit(ctx->odp_actions,
347bf289
AZ
3827 OVS_ACTION_ATTR_HASH,
3828 sizeof *act_hash);
3829 act_hash->hash_alg = xr->hash_alg;
62ac1f20 3830 act_hash->hash_basis = xr->hash_basis;
347bf289
AZ
3831
3832 /* Recirc action. */
1520ef4f 3833 nl_msg_put_u32(ctx->odp_actions, OVS_ACTION_ATTR_RECIRC,
347bf289 3834 xr->recirc_id);
081617f0
JS
3835 } else if (is_native_tunnel) {
3836 /* Output to native tunnel port. */
11938578 3837 native_tunnel_output(ctx, xport, flow, odp_port, truncate);
081617f0 3838 flow->tunnel = flow_tnl; /* Restore tunnel metadata */
a36de779 3839
081617f0
JS
3840 } else if (terminate_native_tunnel(ctx, ofp_port, flow, wc,
3841 &odp_tnl_port)) {
3842 /* Intercept packet to be received on native tunnel port. */
3843 nl_msg_put_odp_port(ctx->odp_actions, OVS_ACTION_ATTR_TUNNEL_POP,
3844 odp_tnl_port);
a36de779 3845
081617f0
JS
3846 } else {
3847 /* Tunnel push-pop action is not compatible with
3848 * IPFIX action. */
3849 compose_ipfix_action(ctx, out_port);
3850
3851 /* Handle truncation of the mirrored packet. */
3852 if (ctx->mirror_snaplen > 0 &&
3853 ctx->mirror_snaplen < UINT16_MAX) {
3854 struct ovs_action_trunc *trunc;
3855
3856 trunc = nl_msg_put_unspec_uninit(ctx->odp_actions,
3857 OVS_ACTION_ATTR_TRUNC,
3858 sizeof *trunc);
3859 trunc->max_len = ctx->mirror_snaplen;
3860 if (!ctx->xbridge->support.trunc) {
3861 ctx->xout->slow |= SLOW_ACTION;
1356dbd1
WT
3862 }
3863 }
081617f0
JS
3864
3865 nl_msg_put_odp_port(ctx->odp_actions,
3866 OVS_ACTION_ATTR_OUTPUT,
3867 out_port);
adcf00ba 3868 }
9583bc14 3869
6cbbf4fa
EJ
3870 ctx->sflow_odp_port = odp_port;
3871 ctx->sflow_n_outputs++;
2031ef97 3872 ctx->nf_output_iface = ofp_port;
6cbbf4fa
EJ
3873 }
3874
7efbc3b7
BP
3875 if (mbridge_has_mirrors(ctx->xbridge->mbridge) && xport->xbundle) {
3876 mirror_packet(ctx, xport->xbundle,
3877 xbundle_mirror_dst(xport->xbundle->xbridge,
3878 xport->xbundle));
3879 }
3880
6cbbf4fa 3881 out:
9583bc14 3882 /* Restore flow */
f0fb825a 3883 memcpy(flow->vlans, flow_vlans, sizeof flow->vlans);
33bf9176 3884 flow->nw_tos = flow_nw_tos;
5dbfe239
ZB
3885 flow->dl_dst = flow_dl_dst;
3886 flow->dl_src = flow_dl_src;
3887 flow->packet_type = flow_packet_type;
3888 flow->dl_type = flow_dl_type;
9583bc14
EJ
3889}
3890
3891static void
e93ef1c7 3892compose_output_action(struct xlate_ctx *ctx, ofp_port_t ofp_port,
feee58b9 3893 const struct xlate_bond_recirc *xr,
11938578 3894 bool is_last_action, bool truncate)
9583bc14 3895{
11938578
AZ
3896 compose_output_action__(ctx, ofp_port, xr, true,
3897 is_last_action, truncate);
9583bc14
EJ
3898}
3899
bb61b33d 3900static void
feee58b9 3901xlate_recursively(struct xlate_ctx *ctx, struct rule_dpif *rule,
96c3a6e5
AZ
3902 bool deepens, bool is_last_action,
3903 xlate_actions_handler *actions_xlator)
bb61b33d
BP
3904{
3905 struct rule_dpif *old_rule = ctx->rule;
8b1e5560 3906 ovs_be64 old_cookie = ctx->rule_cookie;
dc723c44 3907 const struct rule_actions *actions;
bb61b33d
BP
3908
3909 if (ctx->xin->resubmit_stats) {
70742c7f 3910 rule_dpif_credit_stats(rule, ctx->xin->resubmit_stats);
bb61b33d
BP
3911 }
3912
98b07853 3913 ctx->resubmits++;
790c5d26 3914
790c5d26 3915 ctx->depth += deepens;
bb61b33d 3916 ctx->rule = rule;
07a3cd5c
BP
3917 ctx->rule_cookie = rule->up.flow_cookie;
3918 actions = rule_get_actions(&rule->up);
96c3a6e5
AZ
3919 actions_xlator(actions->ofpacts, actions->ofpacts_len, ctx,
3920 is_last_action);
8b1e5560 3921 ctx->rule_cookie = old_cookie;
bb61b33d 3922 ctx->rule = old_rule;
790c5d26 3923 ctx->depth -= deepens;
bb61b33d
BP
3924}
3925
bd3240ba
SH
3926static bool
3927xlate_resubmit_resource_check(struct xlate_ctx *ctx)
9583bc14 3928{
790c5d26 3929 if (ctx->depth >= MAX_DEPTH) {
2d9b49dd 3930 xlate_report_error(ctx, "over max translation depth %d", MAX_DEPTH);
fff1b9c0 3931 ctx->error = XLATE_RECURSION_TOO_DEEP;
790c5d26 3932 } else if (ctx->resubmits >= MAX_RESUBMITS) {
2d9b49dd 3933 xlate_report_error(ctx, "over %d resubmit actions", MAX_RESUBMITS);
fff1b9c0 3934 ctx->error = XLATE_TOO_MANY_RESUBMITS;
1520ef4f 3935 } else if (ctx->odp_actions->size > UINT16_MAX) {
2d9b49dd 3936 xlate_report_error(ctx, "resubmits yielded over 64 kB of actions");
fff1b9c0
JR
3937 /* NOT an error, as we'll be slow-pathing the flow in this case? */
3938 ctx->exit = true; /* XXX: translation still terminated! */
6fd6ed71 3939 } else if (ctx->stack.size >= 65536) {
2d9b49dd 3940 xlate_report_error(ctx, "resubmits yielded over 64 kB of stack");
fff1b9c0 3941 ctx->error = XLATE_STACK_TOO_DEEP;
98b07853 3942 } else {
bd3240ba
SH
3943 return true;
3944 }
3945
3946 return false;
3947}
3948
2cd20955
JR
3949static void
3950tuple_swap_flow(struct flow *flow, bool ipv4)
3951{
3952 uint8_t nw_proto = flow->nw_proto;
3953 flow->nw_proto = flow->ct_nw_proto;
3954 flow->ct_nw_proto = nw_proto;
3955
3956 if (ipv4) {
3957 ovs_be32 nw_src = flow->nw_src;
3958 flow->nw_src = flow->ct_nw_src;
3959 flow->ct_nw_src = nw_src;
3960
3961 ovs_be32 nw_dst = flow->nw_dst;
3962 flow->nw_dst = flow->ct_nw_dst;
3963 flow->ct_nw_dst = nw_dst;
3964 } else {
3965 struct in6_addr ipv6_src = flow->ipv6_src;
3966 flow->ipv6_src = flow->ct_ipv6_src;
3967 flow->ct_ipv6_src = ipv6_src;
3968
3969 struct in6_addr ipv6_dst = flow->ipv6_dst;
3970 flow->ipv6_dst = flow->ct_ipv6_dst;
3971 flow->ct_ipv6_dst = ipv6_dst;
3972 }
3973
3974 ovs_be16 tp_src = flow->tp_src;
3975 flow->tp_src = flow->ct_tp_src;
3976 flow->ct_tp_src = tp_src;
3977
3978 ovs_be16 tp_dst = flow->tp_dst;
3979 flow->tp_dst = flow->ct_tp_dst;
3980 flow->ct_tp_dst = tp_dst;
3981}
3982
3983static void
3984tuple_swap(struct flow *flow, struct flow_wildcards *wc)
3985{
3986 bool ipv4 = (flow->dl_type == htons(ETH_TYPE_IP));
3987
3988 tuple_swap_flow(flow, ipv4);
3989 tuple_swap_flow(&wc->masks, ipv4);
3990}
3991
bd3240ba 3992static void
6d328fa2 3993xlate_table_action(struct xlate_ctx *ctx, ofp_port_t in_port, uint8_t table_id,
2cd20955 3994 bool may_packet_in, bool honor_table_miss,
96c3a6e5
AZ
3995 bool with_ct_orig, bool is_last_action,
3996 xlate_actions_handler *xlator)
bd3240ba 3997{
e12ec36b
SH
3998 /* Check if we need to recirculate before matching in a table. */
3999 if (ctx->was_mpls) {
4000 ctx_trigger_freeze(ctx);
4001 return;
4002 }
bd3240ba 4003 if (xlate_resubmit_resource_check(ctx)) {
9583bc14 4004 uint8_t old_table_id = ctx->table_id;
3f207910 4005 struct rule_dpif *rule;
9583bc14
EJ
4006
4007 ctx->table_id = table_id;
4008
2cd20955
JR
4009 /* Swap packet fields with CT 5-tuple if requested. */
4010 if (with_ct_orig) {
4011 /* Do not swap if there is no CT tuple, or if key is not IP. */
4012 if (ctx->xin->flow.ct_nw_proto == 0 ||
4013 !is_ip_any(&ctx->xin->flow)) {
4014 xlate_report_error(ctx,
4015 "resubmit(ct) with non-tracked or non-IP packet!");
4016 return;
4017 }
4018 tuple_swap(&ctx->xin->flow, ctx->wc);
4019 }
34dd0d78 4020 rule = rule_dpif_lookup_from_table(ctx->xbridge->ofproto,
1f4a8933 4021 ctx->xin->tables_version,
c0e638aa 4022 &ctx->xin->flow, ctx->wc,
34dd0d78
JR
4023 ctx->xin->resubmit_stats,
4024 &ctx->table_id, in_port,
a027899e
JR
4025 may_packet_in, honor_table_miss,
4026 ctx->xin->xcache);
2cd20955
JR
4027 /* Swap back. */
4028 if (with_ct_orig) {
4029 tuple_swap(&ctx->xin->flow, ctx->wc);
4030 }
ad3efdcb 4031
a2143702 4032 if (rule) {
83709dfa
JR
4033 /* Fill in the cache entry here instead of xlate_recursively
4034 * to make the reference counting more explicit. We take a
4035 * reference in the lookups above if we are going to cache the
4036 * rule. */
4037 if (ctx->xin->xcache) {
4038 struct xc_entry *entry;
4039
4040 entry = xlate_cache_add_entry(ctx->xin->xcache, XC_RULE);
901a517e 4041 entry->rule = rule;
07a3cd5c 4042 ofproto_rule_ref(&rule->up);
83709dfa 4043 }
2d9b49dd
BP
4044
4045 struct ovs_list *old_trace = ctx->xin->trace;
4046 xlate_report_table(ctx, rule, table_id);
feee58b9 4047 xlate_recursively(ctx, rule, table_id <= old_table_id,
96c3a6e5 4048 is_last_action, xlator);
2d9b49dd 4049 ctx->xin->trace = old_trace;
ad3efdcb
EJ
4050 }
4051
9583bc14 4052 ctx->table_id = old_table_id;
98b07853 4053 return;
9583bc14
EJ
4054 }
4055}
4056
76973237 4057/* Consumes the group reference, which is only taken if xcache exists. */
f4fb341b 4058static void
1e684d7d
RW
4059xlate_group_stats(struct xlate_ctx *ctx, struct group_dpif *group,
4060 struct ofputil_bucket *bucket)
4061{
4062 if (ctx->xin->resubmit_stats) {
4063 group_dpif_credit_stats(group, bucket, ctx->xin->resubmit_stats);
4064 }
4065 if (ctx->xin->xcache) {
4066 struct xc_entry *entry;
4067
4068 entry = xlate_cache_add_entry(ctx->xin->xcache, XC_GROUP);
901a517e
JR
4069 entry->group.group = group;
4070 entry->group.bucket = bucket;
1e684d7d
RW
4071 }
4072}
4073
4074static void
feee58b9
AZ
4075xlate_group_bucket(struct xlate_ctx *ctx, struct ofputil_bucket *bucket,
4076 bool is_last_action)
f4fb341b
SH
4077{
4078 uint64_t action_list_stub[1024 / 8];
0a2869d5
BP
4079 struct ofpbuf action_list = OFPBUF_STUB_INITIALIZER(action_list_stub);
4080 struct ofpbuf action_set = ofpbuf_const_initializer(bucket->ofpacts,
4081 bucket->ofpacts_len);
5b09e569 4082 struct flow old_flow = ctx->xin->flow;
e12ec36b 4083 bool old_was_mpls = ctx->was_mpls;
f4fb341b 4084
f4fb341b 4085 ofpacts_execute_action_set(&action_list, &action_set);
790c5d26 4086 ctx->depth++;
feee58b9 4087 do_xlate_actions(action_list.data, action_list.size, ctx, is_last_action);
790c5d26 4088 ctx->depth--;
f4fb341b 4089
f4fb341b 4090 ofpbuf_uninit(&action_list);
5b09e569 4091
77ab5fd2 4092 /* Check if need to freeze. */
1d361a81 4093 if (ctx->freezing) {
77ab5fd2 4094 finish_freezing(ctx);
e672ff9b
JR
4095 }
4096
5b09e569
JR
4097 /* Roll back flow to previous state.
4098 * This is equivalent to cloning the packet for each bucket.
4099 *
4100 * As a side effect any subsequently applied actions will
4101 * also effectively be applied to a clone of the packet taken
4102 * just before applying the all or indirect group.
4103 *
4104 * Note that group buckets are action sets, hence they cannot modify the
4105 * main action set. Also any stack actions are ignored when executing an
4106 * action set, so group buckets cannot change the stack either.
4107 * However, we do allow resubmit actions in group buckets, which could
4108 * break the above assumptions. It is up to the controller to not mess up
4109 * with the action_set and stack in the tables resubmitted to from
4110 * group buckets. */
4111 ctx->xin->flow = old_flow;
4112
e12ec36b
SH
4113 /* The group bucket popping MPLS should have no effect after bucket
4114 * execution. */
4115 ctx->was_mpls = old_was_mpls;
4116
5b09e569
JR
4117 /* The fact that the group bucket exits (for any reason) does not mean that
4118 * the translation after the group action should exit. Specifically, if
1d361a81
BP
4119 * the group bucket freezes translation, the actions after the group action
4120 * must continue processing with the original, not the frozen packet! */
5b09e569 4121 ctx->exit = false;
b99654b0
VDA
4122
4123 /* Context error in a bucket should not impact processing of other buckets
4124 * or actions. This is similar to cloning a packet for group buckets.
4125 * There is no need to restore the error back to old value due to the fact
4126 * that we actually processed group action which can happen only when there
4127 * is no previous context error.
4128 *
4129 * Exception to above is errors which are system limits to protect
4130 * translation from running too long or occupy too much space. These errors
4131 * should not be masked. XLATE_RECURSION_TOO_DEEP, XLATE_TOO_MANY_RESUBMITS
4132 * and XLATE_STACK_TOO_DEEP fall in this category. */
4133 if (ctx->error == XLATE_TOO_MANY_MPLS_LABELS ||
4134 ctx->error == XLATE_UNSUPPORTED_PACKET_TYPE) {
4135 /* reset the error and continue processing other buckets */
4136 ctx->error = XLATE_OK;
4137 }
f4fb341b
SH
4138}
4139
4140static void
feee58b9
AZ
4141xlate_all_group(struct xlate_ctx *ctx, struct group_dpif *group,
4142 bool is_last_action)
f4fb341b 4143{
1e684d7d 4144 struct ofputil_bucket *bucket;
07a3cd5c 4145 LIST_FOR_EACH (bucket, list_node, &group->up.buckets) {
feee58b9
AZ
4146 bool last = is_last_action && !bucket->list_node.next;
4147 xlate_group_bucket(ctx, bucket, last);
f4fb341b 4148 }
1e684d7d 4149 xlate_group_stats(ctx, group, NULL);
f4fb341b
SH
4150}
4151
dd8cd4b4 4152static void
feee58b9
AZ
4153xlate_ff_group(struct xlate_ctx *ctx, struct group_dpif *group,
4154 bool is_last_action)
dd8cd4b4 4155{
1e684d7d 4156 struct ofputil_bucket *bucket;
dd8cd4b4
SH
4157
4158 bucket = group_first_live_bucket(ctx, group, 0);
4159 if (bucket) {
feee58b9 4160 xlate_group_bucket(ctx, bucket, is_last_action);
1e684d7d 4161 xlate_group_stats(ctx, group, bucket);
76973237 4162 } else if (ctx->xin->xcache) {
07a3cd5c 4163 ofproto_group_unref(&group->up);
dd8cd4b4
SH
4164 }
4165}
4166
fe7e5749 4167static void
feee58b9
AZ
4168xlate_default_select_group(struct xlate_ctx *ctx, struct group_dpif *group,
4169 bool is_last_action)
fe7e5749 4170{
49a73e0c 4171 struct flow_wildcards *wc = ctx->wc;
1e684d7d 4172 struct ofputil_bucket *bucket;
fe7e5749
SH
4173 uint32_t basis;
4174
1d1aae0b 4175 basis = flow_hash_symmetric_l4(&ctx->xin->flow, 0);
80e3509d 4176 flow_mask_hash_fields(&ctx->xin->flow, wc, NX_HASH_FIELDS_SYMMETRIC_L4);
fe7e5749
SH
4177 bucket = group_best_live_bucket(ctx, group, basis);
4178 if (bucket) {
feee58b9 4179 xlate_group_bucket(ctx, bucket, is_last_action);
1e684d7d 4180 xlate_group_stats(ctx, group, bucket);
76973237 4181 } else if (ctx->xin->xcache) {
07a3cd5c 4182 ofproto_group_unref(&group->up);
fe7e5749
SH
4183 }
4184}
4185
0c4b9393 4186static void
feee58b9
AZ
4187xlate_hash_fields_select_group(struct xlate_ctx *ctx, struct group_dpif *group,
4188 bool is_last_action)
0c4b9393 4189{
07a3cd5c
BP
4190 const struct field_array *fields = &group->up.props.fields;
4191 const uint8_t *mask_values = fields->values;
4192 uint32_t basis = hash_uint64(group->up.props.selection_method_param);
0c4b9393 4193
07a3cd5c 4194 size_t i;
e8dba719
JR
4195 BITMAP_FOR_EACH_1 (i, MFF_N_IDS, fields->used.bm) {
4196 const struct mf_field *mf = mf_from_id(i);
0c4b9393 4197
5bcd4754 4198 /* Skip fields for which prerequisites are not met. */
e8dba719
JR
4199 if (!mf_are_prereqs_ok(mf, &ctx->xin->flow, ctx->wc)) {
4200 /* Skip the mask bytes for this field. */
4201 mask_values += mf->n_bytes;
4202 continue;
4203 }
0c4b9393 4204
e8dba719
JR
4205 union mf_value value;
4206 union mf_value mask;
0c4b9393 4207
e8dba719
JR
4208 mf_get_value(mf, &ctx->xin->flow, &value);
4209 /* Mask the value. */
4210 for (int j = 0; j < mf->n_bytes; j++) {
4211 mask.b[j] = *mask_values++;
4212 value.b[j] &= mask.b[j];
4213 }
4214 basis = hash_bytes(&value, mf->n_bytes, basis);
1cb20095 4215
e8dba719
JR
4216 /* For tunnels, hash in whether the field is present. */
4217 if (mf_is_tun_metadata(mf)) {
4218 basis = hash_boolean(mf_is_set(mf, &ctx->xin->flow), basis);
0c4b9393 4219 }
e8dba719
JR
4220
4221 mf_mask_field_masked(mf, &mask, ctx->wc);
0c4b9393
SH
4222 }
4223
07a3cd5c 4224 struct ofputil_bucket *bucket = group_best_live_bucket(ctx, group, basis);
0c4b9393 4225 if (bucket) {
feee58b9 4226 xlate_group_bucket(ctx, bucket, is_last_action);
0c4b9393 4227 xlate_group_stats(ctx, group, bucket);
76973237 4228 } else if (ctx->xin->xcache) {
07a3cd5c 4229 ofproto_group_unref(&group->up);
0c4b9393
SH
4230 }
4231}
4232
53cc166a 4233static void
feee58b9
AZ
4234xlate_dp_hash_select_group(struct xlate_ctx *ctx, struct group_dpif *group,
4235 bool is_last_action)
53cc166a
JR
4236{
4237 struct ofputil_bucket *bucket;
4238
4239 /* dp_hash value 0 is special since it means that the dp_hash has not been
4240 * computed, as all computed dp_hash values are non-zero. Therefore
4241 * compare to zero can be used to decide if the dp_hash value is valid
4242 * without masking the dp_hash field. */
4243 if (!ctx->xin->flow.dp_hash) {
07a3cd5c 4244 uint64_t param = group->up.props.selection_method_param;
53cc166a
JR
4245
4246 ctx_trigger_recirculate_with_hash(ctx, param >> 32, (uint32_t)param);
4247 } else {
07a3cd5c 4248 uint32_t n_buckets = group->up.n_buckets;
53cc166a
JR
4249 if (n_buckets) {
4250 /* Minimal mask to cover the number of buckets. */
4251 uint32_t mask = (1 << log_2_ceil(n_buckets)) - 1;
4252 /* Multiplier chosen to make the trivial 1 bit case to
4253 * actually distribute amongst two equal weight buckets. */
4254 uint32_t basis = 0xc2b73583 * (ctx->xin->flow.dp_hash & mask);
4255
4256 ctx->wc->masks.dp_hash |= mask;
4257 bucket = group_best_live_bucket(ctx, group, basis);
4258 if (bucket) {
feee58b9 4259 xlate_group_bucket(ctx, bucket, is_last_action);
53cc166a
JR
4260 xlate_group_stats(ctx, group, bucket);
4261 }
4262 }
4263 }
4264}
4265
7565c3e4 4266static void
feee58b9
AZ
4267xlate_select_group(struct xlate_ctx *ctx, struct group_dpif *group,
4268 bool is_last_action)
7565c3e4 4269{
07a3cd5c 4270 const char *selection_method = group->up.props.selection_method;
7565c3e4 4271
e12ec36b
SH
4272 /* Select groups may access flow keys beyond L2 in order to
4273 * select a bucket. Recirculate as appropriate to make this possible.
4274 */
4275 if (ctx->was_mpls) {
4276 ctx_trigger_freeze(ctx);
4277 }
4278
7565c3e4 4279 if (selection_method[0] == '\0') {
feee58b9 4280 xlate_default_select_group(ctx, group, is_last_action);
0c4b9393 4281 } else if (!strcasecmp("hash", selection_method)) {
feee58b9 4282 xlate_hash_fields_select_group(ctx, group, is_last_action);
53cc166a 4283 } else if (!strcasecmp("dp_hash", selection_method)) {
feee58b9 4284 xlate_dp_hash_select_group(ctx, group, is_last_action);
7565c3e4
SH
4285 } else {
4286 /* Parsing of groups should ensure this never happens */
4287 OVS_NOT_REACHED();
4288 }
4289}
4290
f4fb341b 4291static void
feee58b9
AZ
4292xlate_group_action__(struct xlate_ctx *ctx, struct group_dpif *group,
4293 bool is_last_action)
f4fb341b 4294{
0eb48fe1 4295 bool was_in_group = ctx->in_group;
5a070238
BP
4296 ctx->in_group = true;
4297
07a3cd5c 4298 switch (group->up.type) {
f4fb341b
SH
4299 case OFPGT11_ALL:
4300 case OFPGT11_INDIRECT:
feee58b9 4301 xlate_all_group(ctx, group, is_last_action);
f4fb341b
SH
4302 break;
4303 case OFPGT11_SELECT:
feee58b9 4304 xlate_select_group(ctx, group, is_last_action);
f4fb341b 4305 break;
dd8cd4b4 4306 case OFPGT11_FF:
feee58b9 4307 xlate_ff_group(ctx, group, is_last_action);
dd8cd4b4 4308 break;
f4fb341b 4309 default:
428b2edd 4310 OVS_NOT_REACHED();
f4fb341b 4311 }
5a070238 4312
0eb48fe1 4313 ctx->in_group = was_in_group;
f4fb341b
SH
4314}
4315
4316static bool
feee58b9
AZ
4317xlate_group_action(struct xlate_ctx *ctx, uint32_t group_id,
4318 bool is_last_action)
f4fb341b 4319{
0eb48fe1 4320 if (xlate_resubmit_resource_check(ctx)) {
f4fb341b 4321 struct group_dpif *group;
f4fb341b 4322
76973237
JR
4323 /* Take ref only if xcache exists. */
4324 group = group_dpif_lookup(ctx->xbridge->ofproto, group_id,
1f4a8933 4325 ctx->xin->tables_version, ctx->xin->xcache);
db88b35c
JR
4326 if (!group) {
4327 /* XXX: Should set ctx->error ? */
2d9b49dd
BP
4328 xlate_report(ctx, OFT_WARN, "output to nonexistent group %"PRIu32,
4329 group_id);
f4fb341b
SH
4330 return true;
4331 }
feee58b9 4332 xlate_group_action__(ctx, group, is_last_action);
f4fb341b
SH
4333 }
4334
4335 return false;
4336}
4337
9583bc14
EJ
4338static void
4339xlate_ofpact_resubmit(struct xlate_ctx *ctx,
feee58b9
AZ
4340 const struct ofpact_resubmit *resubmit,
4341 bool is_last_action)
9583bc14 4342{
4e022ec0 4343 ofp_port_t in_port;
9583bc14 4344 uint8_t table_id;
adcf00ba
AZ
4345 bool may_packet_in = false;
4346 bool honor_table_miss = false;
4347
4348 if (ctx->rule && rule_dpif_is_internal(ctx->rule)) {
4349 /* Still allow missed packets to be sent to the controller
4350 * if resubmitting from an internal table. */
4351 may_packet_in = true;
4352 honor_table_miss = true;
4353 }
9583bc14
EJ
4354
4355 in_port = resubmit->in_port;
4356 if (in_port == OFPP_IN_PORT) {
4e022ec0 4357 in_port = ctx->xin->flow.in_port.ofp_port;
9583bc14
EJ
4358 }
4359
4360 table_id = resubmit->table_id;
4361 if (table_id == 255) {
4362 table_id = ctx->table_id;
4363 }
4364
adcf00ba 4365 xlate_table_action(ctx, in_port, table_id, may_packet_in,
feee58b9 4366 honor_table_miss, resubmit->with_ct_orig,
96c3a6e5 4367 is_last_action, do_xlate_actions);
9583bc14
EJ
4368}
4369
4370static void
feee58b9
AZ
4371flood_packet_to_port(struct xlate_ctx *ctx, const struct xport *xport,
4372 bool all, bool is_last_action)
9583bc14 4373{
feee58b9
AZ
4374 if (!xport) {
4375 return;
4376 }
4377
4378 if (all) {
4379 compose_output_action__(ctx, xport->ofp_port, NULL, false,
11938578 4380 is_last_action, false);
feee58b9 4381 } else {
11938578
AZ
4382 compose_output_action(ctx, xport->ofp_port, NULL, is_last_action,
4383 false);
feee58b9
AZ
4384 }
4385}
4386
4387static void
4388flood_packets(struct xlate_ctx *ctx, bool all, bool is_last_action)
4389{
4390 const struct xport *xport, *last = NULL;
9583bc14 4391
feee58b9 4392 /* Use 'last' the keep track of the last output port. */
46c88433
EJ
4393 HMAP_FOR_EACH (xport, ofp_node, &ctx->xbridge->xports) {
4394 if (xport->ofp_port == ctx->xin->flow.in_port.ofp_port) {
9583bc14
EJ
4395 continue;
4396 }
4397
feee58b9
AZ
4398 if (all || !(xport->config & OFPUTIL_PC_NO_FLOOD)) {
4399 /* 'last' is not the last port, send a packet out, and
4400 * update 'last'. */
4401 flood_packet_to_port(ctx, last, all, false);
4402 last = xport;
9583bc14
EJ
4403 }
4404 }
4405
feee58b9
AZ
4406 /* Send the packet to the 'last' port. */
4407 flood_packet_to_port(ctx, last, all, is_last_action);
2031ef97 4408 ctx->nf_output_iface = NF_OUT_FLOOD;
9583bc14
EJ
4409}
4410
74c4530d
JP
4411static void
4412put_controller_user_action(struct xlate_ctx *ctx,
4413 bool dont_send, bool continuation,
4414 uint32_t recirc_id, int len,
4415 enum ofp_packet_in_reason reason,
4416 uint16_t controller_id)
4417{
4418 struct user_action_cookie cookie;
4419
4420 memset(&cookie, 0, sizeof cookie);
4421 cookie.type = USER_ACTION_COOKIE_CONTROLLER;
4422 cookie.ofp_in_port = OFPP_NONE,
4423 cookie.ofproto_uuid = ctx->xbridge->ofproto->uuid;
4424 cookie.controller.dont_send = dont_send;
4425 cookie.controller.continuation = continuation;
4426 cookie.controller.reason = reason;
4427 cookie.controller.recirc_id = recirc_id;
4428 put_32aligned_be64(&cookie.controller.rule_cookie, ctx->rule_cookie);
4429 cookie.controller.controller_id = controller_id;
4430 cookie.controller.max_len = len;
4431
4432 odp_port_t odp_port = ofp_port_to_odp_port(ctx->xbridge,
4433 ctx->xin->flow.in_port.ofp_port);
4434 uint32_t pid = dpif_port_get_pid(ctx->xbridge->dpif, odp_port,
4435 flow_hash_5tuple(&ctx->xin->flow, 0));
4436 odp_put_userspace_action(pid, &cookie, sizeof cookie, ODPP_NONE,
4437 false, ctx->odp_actions);
4438}
4439
9583bc14 4440static void
d39ec23d
JP
4441xlate_controller_action(struct xlate_ctx *ctx, int len,
4442 enum ofp_packet_in_reason reason,
4443 uint16_t controller_id,
4444 const uint8_t *userdata, size_t userdata_len)
9583bc14 4445{
b476e2f2 4446 xlate_commit_actions(ctx);
beb75a40 4447
9bfe9334
BP
4448 /* A packet sent by an action in a table-miss rule is considered an
4449 * explicit table miss. OpenFlow before 1.3 doesn't have that concept so
4450 * it will get translated back to OFPR_ACTION for those versions. */
4451 if (reason == OFPR_ACTION
07a3cd5c 4452 && ctx->rule && rule_is_table_miss(&ctx->rule->up)) {
9bfe9334
BP
4453 reason = OFPR_EXPLICIT_MISS;
4454 }
4455
d39ec23d
JP
4456 struct frozen_state state = {
4457 .table_id = ctx->table_id,
4458 .ofproto_uuid = ctx->xbridge->ofproto->uuid,
4459 .stack = ctx->stack.data,
4460 .stack_size = ctx->stack.size,
4461 .mirrors = ctx->mirrors,
4462 .conntracked = ctx->conntracked,
4463 .ofpacts = NULL,
4464 .ofpacts_len = 0,
4465 .action_set = NULL,
4466 .action_set_len = 0,
4467 .userdata = CONST_CAST(uint8_t *, userdata),
4468 .userdata_len = userdata_len,
9bfe9334 4469 };
d39ec23d 4470 frozen_metadata_from_flow(&state.metadata, &ctx->xin->flow);
9583bc14 4471
d39ec23d
JP
4472 uint32_t recirc_id = recirc_alloc_id_ctx(&state);
4473 if (!recirc_id) {
4474 xlate_report_error(ctx, "Failed to allocate recirculation id");
4475 ctx->error = XLATE_NO_RECIRCULATION_CONTEXT;
4476 return;
4477 }
4478 recirc_refs_add(&ctx->xout->recircs, recirc_id);
df70a773 4479
d39ec23d
JP
4480 size_t offset;
4481 size_t ac_offset;
4482 uint32_t meter_id = ctx->xbridge->ofproto->up.controller_meter_id;
4483 if (meter_id != UINT32_MAX) {
4484 /* If controller meter is configured, generate clone(meter, userspace)
4485 * action. */
4486 offset = nl_msg_start_nested(ctx->odp_actions, OVS_ACTION_ATTR_SAMPLE);
4487 nl_msg_put_u32(ctx->odp_actions, OVS_SAMPLE_ATTR_PROBABILITY,
4488 UINT32_MAX);
4489 ac_offset = nl_msg_start_nested(ctx->odp_actions,
4490 OVS_SAMPLE_ATTR_ACTIONS);
4491 nl_msg_put_u32(ctx->odp_actions, OVS_ACTION_ATTR_METER, meter_id);
df70a773 4492 }
3b4fff43 4493
d39ec23d
JP
4494 /* Generate the datapath flows even if we don't send the packet-in
4495 * so that debugging more closely represents normal state. */
74c4530d 4496 bool dont_send = false;
d39ec23d 4497 if (!ctx->xin->allow_side_effects && !ctx->xin->xcache) {
74c4530d 4498 dont_send = true;
d39ec23d 4499 }
74c4530d
JP
4500 put_controller_user_action(ctx, dont_send, false, recirc_id, len,
4501 reason, controller_id);
d39ec23d
JP
4502
4503 if (meter_id != UINT32_MAX) {
4504 nl_msg_end_nested(ctx->odp_actions, ac_offset);
4505 nl_msg_end_nested(ctx->odp_actions, offset);
4506 }
9583bc14
EJ
4507}
4508
e6bc8e74
YHW
4509/* Creates a frozen state, and allocates a unique recirc id for the given
4510 * state. Returns a non-zero recirc id if it is allocated successfully.
4511 * Returns 0 otherwise.
4512 **/
4513static uint32_t
77ab5fd2
BP
4514finish_freezing__(struct xlate_ctx *ctx, uint8_t table)
4515{
1d361a81 4516 ovs_assert(ctx->freezing);
7bbdd84f 4517
1d361a81 4518 struct frozen_state state = {
07659514 4519 .table_id = table,
07a3cd5c 4520 .ofproto_uuid = ctx->xbridge->ofproto->uuid,
5c1b2314 4521 .stack = ctx->stack.data,
84cf3c1f 4522 .stack_size = ctx->stack.size,
29bae541 4523 .mirrors = ctx->mirrors,
07659514 4524 .conntracked = ctx->conntracked,
43e73536 4525 .xport_uuid = ctx->xin->xport_uuid,
1d361a81
BP
4526 .ofpacts = ctx->frozen_actions.data,
4527 .ofpacts_len = ctx->frozen_actions.size,
417509fa 4528 .action_set = ctx->action_set.data,
8a5fb3b4 4529 .action_set_len = ctx->action_set.size,
74c4530d
JP
4530 .userdata = ctx->pause ? CONST_CAST(uint8_t *,ctx->pause->userdata)
4531 : NULL,
4532 .userdata_len = ctx->pause ? ctx->pause->userdata_len : 0,
2082425c 4533 };
77ab5fd2 4534 frozen_metadata_from_flow(&state.metadata, &ctx->xin->flow);
2082425c 4535
74c4530d
JP
4536 /* Allocate a unique recirc id for the given metadata state in the
4537 * flow. An existing id, with a new reference to the corresponding
4538 * recirculation context, will be returned if possible.
4539 * The life-cycle of this recirc id is managed by associating it
4540 * with the udpif key ('ukey') created for each new datapath flow. */
4541 uint32_t recirc_id = recirc_alloc_id_ctx(&state);
4542 if (!recirc_id) {
4543 xlate_report_error(ctx, "Failed to allocate recirculation id");
4544 ctx->error = XLATE_NO_RECIRCULATION_CONTEXT;
4545 return 0;
4546 }
4547 recirc_refs_add(&ctx->xout->recircs, recirc_id);
4548
77ab5fd2 4549 if (ctx->pause) {
74c4530d 4550 if (!ctx->xin->allow_side_effects && !ctx->xin->xcache) {
e6bc8e74 4551 return 0;
77ab5fd2 4552 }
7bbdd84f 4553
74c4530d
JP
4554 put_controller_user_action(ctx, false, true, recirc_id,
4555 ctx->pause->max_len,
4556 ctx->pause->reason,
4557 ctx->pause->controller_id);
4558 } else {
53cc166a
JR
4559 if (ctx->recirc_update_dp_hash) {
4560 struct ovs_action_hash *act_hash;
4561
4562 /* Hash action. */
4563 act_hash = nl_msg_put_unspec_uninit(ctx->odp_actions,
4564 OVS_ACTION_ATTR_HASH,
4565 sizeof *act_hash);
4566 act_hash->hash_alg = OVS_HASH_ALG_L4; /* Make configurable. */
4567 act_hash->hash_basis = 0; /* Make configurable. */
4568 }
74c4530d 4569 nl_msg_put_u32(ctx->odp_actions, OVS_ACTION_ATTR_RECIRC, recirc_id);
77ab5fd2 4570 }
e672ff9b 4571
1d361a81
BP
4572 /* Undo changes done by freezing. */
4573 ctx_cancel_freeze(ctx);
74c4530d 4574 return recirc_id;
7bbdd84f
SH
4575}
4576
1d361a81 4577/* Called only when we're freezing. */
07659514 4578static void
77ab5fd2 4579finish_freezing(struct xlate_ctx *ctx)
07659514
JS
4580{
4581 xlate_commit_actions(ctx);
77ab5fd2 4582 finish_freezing__(ctx, 0);
07659514
JS
4583}
4584
e37b8437
JS
4585/* Fork the pipeline here. The current packet will continue processing the
4586 * current action list. A clone of the current packet will recirculate, skip
4587 * the remainder of the current action list and asynchronously resume pipeline
4588 * processing in 'table' with the current metadata and action set. */
4589static void
5fdd80cc
YHW
4590compose_recirculate_and_fork(struct xlate_ctx *ctx, uint8_t table,
4591 const uint16_t zone)
e37b8437 4592{
e6bc8e74 4593 uint32_t recirc_id;
1d361a81 4594 ctx->freezing = true;
e6bc8e74
YHW
4595 recirc_id = finish_freezing__(ctx, table);
4596
4597 if (OVS_UNLIKELY(ctx->xin->trace) && recirc_id) {
4598 if (oftrace_add_recirc_node(ctx->xin->recirc_queue,
4599 OFT_RECIRC_CONNTRACK, &ctx->xin->flow,
5fdd80cc 4600 ctx->xin->packet, recirc_id, zone)) {
e6bc8e74
YHW
4601 xlate_report(ctx, OFT_DETAIL, "A clone of the packet is forked to "
4602 "recirculate. The forked pipeline will be resumed at "
4603 "table %u.", table);
4604 } else {
4605 xlate_report(ctx, OFT_DETAIL, "Failed to trace the conntrack "
4606 "forked pipeline with recirc_id = %d.", recirc_id);
4607 }
4608 }
e37b8437
JS
4609}
4610
8bfd0fda
BP
4611static void
4612compose_mpls_push_action(struct xlate_ctx *ctx, struct ofpact_push_mpls *mpls)
9583bc14 4613{
33bf9176 4614 struct flow *flow = &ctx->xin->flow;
8bfd0fda 4615 int n;
33bf9176 4616
8bfd0fda 4617 ovs_assert(eth_type_mpls(mpls->ethertype));
b0a17866 4618
49a73e0c 4619 n = flow_count_mpls_labels(flow, ctx->wc);
8bfd0fda 4620 if (!n) {
704bb0bf 4621 xlate_commit_actions(ctx);
8bfd0fda
BP
4622 } else if (n >= FLOW_MAX_MPLS_LABELS) {
4623 if (ctx->xin->packet != NULL) {
2d9b49dd
BP
4624 xlate_report_error(ctx, "dropping packet on which an MPLS push "
4625 "action can't be performed as it would have "
4626 "more MPLS LSEs than the %d supported.",
4627 FLOW_MAX_MPLS_LABELS);
9583bc14 4628 }
fff1b9c0 4629 ctx->error = XLATE_TOO_MANY_MPLS_LABELS;
8bfd0fda 4630 return;
9583bc14 4631 }
b0a17866 4632
742c0ac3
JR
4633 /* Update flow's MPLS stack, and clear L3/4 fields to mark them invalid. */
4634 flow_push_mpls(flow, n, mpls->ethertype, ctx->wc, true);
9583bc14
EJ
4635}
4636
8bfd0fda 4637static void
9cfef3d0 4638compose_mpls_pop_action(struct xlate_ctx *ctx, ovs_be16 eth_type)
9583bc14 4639{
8bfd0fda 4640 struct flow *flow = &ctx->xin->flow;
49a73e0c 4641 int n = flow_count_mpls_labels(flow, ctx->wc);
33bf9176 4642
49a73e0c 4643 if (flow_pop_mpls(flow, n, eth_type, ctx->wc)) {
8bf009bf 4644 if (!eth_type_mpls(eth_type) && ctx->xbridge->support.odp.recirc) {
e12ec36b 4645 ctx->was_mpls = true;
7bbdd84f
SH
4646 }
4647 } else if (n >= FLOW_MAX_MPLS_LABELS) {
8bfd0fda 4648 if (ctx->xin->packet != NULL) {
2d9b49dd
BP
4649 xlate_report_error(ctx, "dropping packet on which an "
4650 "MPLS pop action can't be performed as it has "
4651 "more MPLS LSEs than the %d supported.",
4652 FLOW_MAX_MPLS_LABELS);
8bfd0fda 4653 }
fff1b9c0 4654 ctx->error = XLATE_TOO_MANY_MPLS_LABELS;
1520ef4f 4655 ofpbuf_clear(ctx->odp_actions);
9583bc14
EJ
4656 }
4657}
4658
4659static bool
4660compose_dec_ttl(struct xlate_ctx *ctx, struct ofpact_cnt_ids *ids)
4661{
33bf9176
BP
4662 struct flow *flow = &ctx->xin->flow;
4663
4664 if (!is_ip_any(flow)) {
9583bc14
EJ
4665 return false;
4666 }
4667
49a73e0c 4668 ctx->wc->masks.nw_ttl = 0xff;
33bf9176
BP
4669 if (flow->nw_ttl > 1) {
4670 flow->nw_ttl--;
9583bc14
EJ
4671 return false;
4672 } else {
4673 size_t i;
4674
4675 for (i = 0; i < ids->n_controllers; i++) {
d39ec23d
JP
4676 xlate_controller_action(ctx, UINT16_MAX, OFPR_INVALID_TTL,
4677 ids->cnt_ids[i], NULL, 0);
9583bc14
EJ
4678 }
4679
4680 /* Stop processing for current table. */
2d9b49dd
BP
4681 xlate_report(ctx, OFT_WARN, "IPv%d decrement TTL exception",
4682 flow->dl_type == htons(ETH_TYPE_IP) ? 4 : 6);
9583bc14
EJ
4683 return true;
4684 }
4685}
4686
8bfd0fda 4687static void
097d4939
JR
4688compose_set_mpls_label_action(struct xlate_ctx *ctx, ovs_be32 label)
4689{
8bfd0fda 4690 if (eth_type_mpls(ctx->xin->flow.dl_type)) {
49a73e0c 4691 ctx->wc->masks.mpls_lse[0] |= htonl(MPLS_LABEL_MASK);
8bfd0fda 4692 set_mpls_lse_label(&ctx->xin->flow.mpls_lse[0], label);
097d4939 4693 }
097d4939
JR
4694}
4695
8bfd0fda 4696static void
097d4939
JR
4697compose_set_mpls_tc_action(struct xlate_ctx *ctx, uint8_t tc)
4698{
8bfd0fda 4699 if (eth_type_mpls(ctx->xin->flow.dl_type)) {
49a73e0c 4700 ctx->wc->masks.mpls_lse[0] |= htonl(MPLS_TC_MASK);
8bfd0fda 4701 set_mpls_lse_tc(&ctx->xin->flow.mpls_lse[0], tc);
097d4939 4702 }
097d4939
JR
4703}
4704
491e05c2
YY
4705static bool
4706compose_dec_nsh_ttl_action(struct xlate_ctx *ctx)
4707{
4708 struct flow *flow = &ctx->xin->flow;
4709
4710 if ((flow->packet_type == htonl(PT_NSH)) ||
4711 (flow->dl_type == htons(ETH_TYPE_NSH))) {
4712 ctx->wc->masks.nsh.ttl = 0xff;
4713 if (flow->nsh.ttl > 1) {
4714 flow->nsh.ttl--;
4715 return false;
4716 } else {
4717 xlate_controller_action(ctx, UINT16_MAX, OFPR_INVALID_TTL,
4718 0, NULL, 0);
4719 }
4720 }
4721
4722 /* Stop processing for current table. */
4723 xlate_report(ctx, OFT_WARN, "NSH decrement TTL exception");
4724 return true;
4725}
4726
8bfd0fda 4727static void
9cfef3d0 4728compose_set_mpls_ttl_action(struct xlate_ctx *ctx, uint8_t ttl)
9583bc14 4729{
8bfd0fda 4730 if (eth_type_mpls(ctx->xin->flow.dl_type)) {
49a73e0c 4731 ctx->wc->masks.mpls_lse[0] |= htonl(MPLS_TTL_MASK);
8bfd0fda 4732 set_mpls_lse_ttl(&ctx->xin->flow.mpls_lse[0], ttl);
b0a17866 4733 }
9583bc14
EJ
4734}
4735
4736static bool
9cfef3d0 4737compose_dec_mpls_ttl_action(struct xlate_ctx *ctx)
9583bc14 4738{
33bf9176 4739 struct flow *flow = &ctx->xin->flow;
1dd35f8a 4740
8bfd0fda 4741 if (eth_type_mpls(flow->dl_type)) {
22d38fca
JR
4742 uint8_t ttl = mpls_lse_to_ttl(flow->mpls_lse[0]);
4743
49a73e0c 4744 ctx->wc->masks.mpls_lse[0] |= htonl(MPLS_TTL_MASK);
8bfd0fda
BP
4745 if (ttl > 1) {
4746 ttl--;
4747 set_mpls_lse_ttl(&flow->mpls_lse[0], ttl);
4748 return false;
4749 } else {
d39ec23d
JP
4750 xlate_controller_action(ctx, UINT16_MAX, OFPR_INVALID_TTL, 0,
4751 NULL, 0);
8bfd0fda 4752 }
9583bc14 4753 }
22d38fca
JR
4754
4755 /* Stop processing for current table. */
2d9b49dd 4756 xlate_report(ctx, OFT_WARN, "MPLS decrement TTL exception");
22d38fca 4757 return true;
9583bc14
EJ
4758}
4759
8b496c72
BP
4760/* Emits an action that outputs to 'port', within 'ctx'.
4761 *
4762 * 'controller_len' affects only packets sent to an OpenFlow controller. It
4763 * is the maximum number of bytes of the packet to send. UINT16_MAX means to
4764 * send the whole packet (and 0 means to omit the packet entirely).
4765 *
4766 * 'may_packet_in' determines whether the packet may be sent to an OpenFlow
4767 * controller. If it is false, then the packet is never sent to the OpenFlow
4768 * controller.
4769 *
4770 * 'is_last_action' should be true if this output is the last OpenFlow action
4771 * to be processed, which enables certain optimizations.
4772 *
4773 * 'truncate' should be true if the packet to be output is being truncated,
4774 * which suppresses certain optimizations. */
9583bc14 4775static void
8b496c72
BP
4776xlate_output_action(struct xlate_ctx *ctx, ofp_port_t port,
4777 uint16_t controller_len, bool may_packet_in,
4778 bool is_last_action, bool truncate)
9583bc14 4779{
2031ef97 4780 ofp_port_t prev_nf_output_iface = ctx->nf_output_iface;
9583bc14 4781
2031ef97 4782 ctx->nf_output_iface = NF_OUT_DROP;
9583bc14
EJ
4783
4784 switch (port) {
4785 case OFPP_IN_PORT:
feee58b9 4786 compose_output_action(ctx, ctx->xin->flow.in_port.ofp_port, NULL,
8bbbda3c 4787 is_last_action, truncate);
9583bc14
EJ
4788 break;
4789 case OFPP_TABLE:
4e022ec0 4790 xlate_table_action(ctx, ctx->xin->flow.in_port.ofp_port,
11938578 4791 0, may_packet_in, true, false, false,
96c3a6e5 4792 do_xlate_actions);
9583bc14
EJ
4793 break;
4794 case OFPP_NORMAL:
4795 xlate_normal(ctx);
4796 break;
4797 case OFPP_FLOOD:
feee58b9 4798 flood_packets(ctx, false, is_last_action);
9583bc14
EJ
4799 break;
4800 case OFPP_ALL:
feee58b9 4801 flood_packets(ctx, true, is_last_action);
9583bc14
EJ
4802 break;
4803 case OFPP_CONTROLLER:
d39ec23d
JP
4804 xlate_controller_action(ctx, controller_len,
4805 (ctx->in_packet_out ? OFPR_PACKET_OUT
4806 : ctx->in_group ? OFPR_GROUP
4807 : ctx->in_action_set ? OFPR_ACTION_SET
4808 : OFPR_ACTION),
4809 0, NULL, 0);
9583bc14
EJ
4810 break;
4811 case OFPP_NONE:
4812 break;
4813 case OFPP_LOCAL:
4814 default:
4e022ec0 4815 if (port != ctx->xin->flow.in_port.ofp_port) {
8bbbda3c 4816 compose_output_action(ctx, port, NULL, is_last_action, truncate);
9583bc14 4817 } else {
2d9b49dd 4818 xlate_report(ctx, OFT_WARN, "skipping output to input port");
9583bc14
EJ
4819 }
4820 break;
4821 }
4822
4823 if (prev_nf_output_iface == NF_OUT_FLOOD) {
2031ef97
BP
4824 ctx->nf_output_iface = NF_OUT_FLOOD;
4825 } else if (ctx->nf_output_iface == NF_OUT_DROP) {
4826 ctx->nf_output_iface = prev_nf_output_iface;
9583bc14 4827 } else if (prev_nf_output_iface != NF_OUT_DROP &&
2031ef97
BP
4828 ctx->nf_output_iface != NF_OUT_FLOOD) {
4829 ctx->nf_output_iface = NF_OUT_MULTI;
9583bc14
EJ
4830 }
4831}
4832
4833static void
4834xlate_output_reg_action(struct xlate_ctx *ctx,
feee58b9
AZ
4835 const struct ofpact_output_reg *or,
4836 bool is_last_action)
9583bc14
EJ
4837{
4838 uint64_t port = mf_get_subfield(&or->src, &ctx->xin->flow);
4839 if (port <= UINT16_MAX) {
2d9b49dd
BP
4840 xlate_report(ctx, OFT_DETAIL, "output port is %"PRIu64, port);
4841
9583bc14
EJ
4842 union mf_subvalue value;
4843
4844 memset(&value, 0xff, sizeof value);
49a73e0c 4845 mf_write_subfield_flow(&or->src, &value, &ctx->wc->masks);
8b496c72
BP
4846 xlate_output_action(ctx, u16_to_ofp(port), or->max_len,
4847 false, is_last_action, false);
2d9b49dd
BP
4848 } else {
4849 xlate_report(ctx, OFT_WARN, "output port %"PRIu64" is out of range",
4850 port);
9583bc14
EJ
4851 }
4852}
4853
aaca4fe0
WT
4854static void
4855xlate_output_trunc_action(struct xlate_ctx *ctx,
feee58b9
AZ
4856 ofp_port_t port, uint32_t max_len,
4857 bool is_last_action)
aaca4fe0
WT
4858{
4859 bool support_trunc = ctx->xbridge->support.trunc;
4860 struct ovs_action_trunc *trunc;
2f2b904f 4861 char name[OFP10_MAX_PORT_NAME_LEN];
aaca4fe0
WT
4862
4863 switch (port) {
4864 case OFPP_TABLE:
4865 case OFPP_NORMAL:
4866 case OFPP_FLOOD:
4867 case OFPP_ALL:
4868 case OFPP_CONTROLLER:
4869 case OFPP_NONE:
50f96b10 4870 ofputil_port_to_string(port, NULL, name, sizeof name);
2d9b49dd
BP
4871 xlate_report(ctx, OFT_WARN,
4872 "output_trunc does not support port: %s", name);
aaca4fe0
WT
4873 break;
4874 case OFPP_LOCAL:
4875 case OFPP_IN_PORT:
4876 default:
4877 if (port != ctx->xin->flow.in_port.ofp_port) {
4878 const struct xport *xport = get_ofp_port(ctx->xbridge, port);
4879
4880 if (xport == NULL || xport->odp_port == ODPP_NONE) {
4881 /* Since truncate happens at its following output action, if
4882 * the output port is a patch port, the behavior is somehow
49f17344 4883 * unpredictable. For simplicity, disallow this case. */
50f96b10 4884 ofputil_port_to_string(port, NULL, name, sizeof name);
2d9b49dd
BP
4885 xlate_report_error(ctx, "output_trunc does not support "
4886 "patch port %s", name);
aaca4fe0
WT
4887 break;
4888 }
4889
4890 trunc = nl_msg_put_unspec_uninit(ctx->odp_actions,
4891 OVS_ACTION_ATTR_TRUNC,
4892 sizeof *trunc);
4893 trunc->max_len = max_len;
8b496c72 4894 xlate_output_action(ctx, port, 0, false, is_last_action, true);
aaca4fe0
WT
4895 if (!support_trunc) {
4896 ctx->xout->slow |= SLOW_ACTION;
4897 }
4898 } else {
2d9b49dd 4899 xlate_report(ctx, OFT_WARN, "skipping output to input port");
aaca4fe0
WT
4900 }
4901 break;
4902 }
4903}
4904
9583bc14
EJ
4905static void
4906xlate_enqueue_action(struct xlate_ctx *ctx,
feee58b9
AZ
4907 const struct ofpact_enqueue *enqueue,
4908 bool is_last_action)
9583bc14 4909{
4e022ec0 4910 ofp_port_t ofp_port = enqueue->port;
9583bc14
EJ
4911 uint32_t queue_id = enqueue->queue;
4912 uint32_t flow_priority, priority;
4913 int error;
4914
4915 /* Translate queue to priority. */
89a8a7f0 4916 error = dpif_queue_to_priority(ctx->xbridge->dpif, queue_id, &priority);
9583bc14
EJ
4917 if (error) {
4918 /* Fall back to ordinary output action. */
8b496c72
BP
4919 xlate_output_action(ctx, enqueue->port, 0, false,
4920 is_last_action, false);
9583bc14
EJ
4921 return;
4922 }
4923
4924 /* Check output port. */
4925 if (ofp_port == OFPP_IN_PORT) {
4e022ec0
AW
4926 ofp_port = ctx->xin->flow.in_port.ofp_port;
4927 } else if (ofp_port == ctx->xin->flow.in_port.ofp_port) {
9583bc14
EJ
4928 return;
4929 }
4930
4931 /* Add datapath actions. */
4932 flow_priority = ctx->xin->flow.skb_priority;
4933 ctx->xin->flow.skb_priority = priority;
11938578 4934 compose_output_action(ctx, ofp_port, NULL, is_last_action, false);
9583bc14
EJ
4935 ctx->xin->flow.skb_priority = flow_priority;
4936
4937 /* Update NetFlow output port. */
2031ef97
BP
4938 if (ctx->nf_output_iface == NF_OUT_DROP) {
4939 ctx->nf_output_iface = ofp_port;
4940 } else if (ctx->nf_output_iface != NF_OUT_FLOOD) {
4941 ctx->nf_output_iface = NF_OUT_MULTI;
9583bc14
EJ
4942 }
4943}
4944
4945static void
4946xlate_set_queue_action(struct xlate_ctx *ctx, uint32_t queue_id)
4947{
4948 uint32_t skb_priority;
4949
89a8a7f0 4950 if (!dpif_queue_to_priority(ctx->xbridge->dpif, queue_id, &skb_priority)) {
9583bc14
EJ
4951 ctx->xin->flow.skb_priority = skb_priority;
4952 } else {
4953 /* Couldn't translate queue to a priority. Nothing to do. A warning
4954 * has already been logged. */
4955 }
4956}
4957
4958static bool
46c88433 4959slave_enabled_cb(ofp_port_t ofp_port, void *xbridge_)
9583bc14 4960{
46c88433
EJ
4961 const struct xbridge *xbridge = xbridge_;
4962 struct xport *port;
9583bc14
EJ
4963
4964 switch (ofp_port) {
4965 case OFPP_IN_PORT:
4966 case OFPP_TABLE:
4967 case OFPP_NORMAL:
4968 case OFPP_FLOOD:
4969 case OFPP_ALL:
4970 case OFPP_NONE:
4971 return true;
4972 case OFPP_CONTROLLER: /* Not supported by the bundle action. */
4973 return false;
4974 default:
46c88433 4975 port = get_ofp_port(xbridge, ofp_port);
9583bc14
EJ
4976 return port ? port->may_enable : false;
4977 }
4978}
4979
4980static void
4981xlate_bundle_action(struct xlate_ctx *ctx,
feee58b9
AZ
4982 const struct ofpact_bundle *bundle,
4983 bool is_last_action)
9583bc14 4984{
4e022ec0 4985 ofp_port_t port;
9583bc14 4986
49a73e0c 4987 port = bundle_execute(bundle, &ctx->xin->flow, ctx->wc, slave_enabled_cb,
46c88433 4988 CONST_CAST(struct xbridge *, ctx->xbridge));
9583bc14 4989 if (bundle->dst.field) {
49a73e0c 4990 nxm_reg_load(&bundle->dst, ofp_to_u16(port), &ctx->xin->flow, ctx->wc);
2d9b49dd 4991 xlate_report_subfield(ctx, &bundle->dst);
9583bc14 4992 } else {
8b496c72 4993 xlate_output_action(ctx, port, 0, false, is_last_action, false);
9583bc14
EJ
4994 }
4995}
4996
4165b5e0
JS
4997static void
4998xlate_learn_action(struct xlate_ctx *ctx, const struct ofpact_learn *learn)
4999{
49a73e0c 5000 learn_mask(learn, ctx->wc);
9583bc14 5001
df70a773 5002 if (ctx->xin->xcache || ctx->xin->allow_side_effects) {
4165b5e0
JS
5003 uint64_t ofpacts_stub[1024 / 8];
5004 struct ofputil_flow_mod fm;
2c7ee524 5005 struct ofproto_flow_mod ofm__, *ofm;
4165b5e0 5006 struct ofpbuf ofpacts;
2c7ee524
JR
5007 enum ofperr error;
5008
5009 if (ctx->xin->xcache) {
3f3b97b0 5010 ofm = xmalloc(sizeof *ofm);
2c7ee524
JR
5011 } else {
5012 ofm = &ofm__;
5013 }
4165b5e0
JS
5014
5015 ofpbuf_use_stub(&ofpacts, ofpacts_stub, sizeof ofpacts_stub);
2c7ee524 5016 learn_execute(learn, &ctx->xin->flow, &fm, &ofpacts);
2d9b49dd
BP
5017 if (OVS_UNLIKELY(ctx->xin->trace)) {
5018 struct ds s = DS_EMPTY_INITIALIZER;
5019 ds_put_format(&s, "table=%"PRIu8" ", fm.table_id);
50f96b10 5020 match_format(&fm.match, NULL, &s, OFP_DEFAULT_PRIORITY);
2d9b49dd
BP
5021 ds_chomp(&s, ' ');
5022 ds_put_format(&s, " priority=%d", fm.priority);
5023 if (fm.new_cookie) {
5024 ds_put_format(&s, " cookie=%#"PRIx64, ntohll(fm.new_cookie));
5025 }
5026 if (fm.idle_timeout != OFP_FLOW_PERMANENT) {
5027 ds_put_format(&s, " idle=%"PRIu16, fm.idle_timeout);
5028 }
5029 if (fm.hard_timeout != OFP_FLOW_PERMANENT) {
5030 ds_put_format(&s, " hard=%"PRIu16, fm.hard_timeout);
5031 }
5032 if (fm.flags & NX_LEARN_F_SEND_FLOW_REM) {
5033 ds_put_cstr(&s, " send_flow_rem");
5034 }
5035 ds_put_cstr(&s, " actions=");
50f96b10 5036 ofpacts_format(fm.ofpacts, fm.ofpacts_len, NULL, &s);
2d9b49dd
BP
5037 xlate_report(ctx, OFT_DETAIL, "%s", ds_cstr(&s));
5038 ds_destroy(&s);
5039 }
2c7ee524
JR
5040 error = ofproto_dpif_flow_mod_init_for_learn(ctx->xbridge->ofproto,
5041 &fm, ofm);
4165b5e0 5042 ofpbuf_uninit(&ofpacts);
2c7ee524 5043
3f3b97b0 5044 if (!error) {
4c71600d 5045 bool success = true;
3f3b97b0 5046 if (ctx->xin->allow_side_effects) {
4c71600d
DDP
5047 error = ofproto_flow_mod_learn(ofm, ctx->xin->xcache != NULL,
5048 learn->limit, &success);
5049 } else if (learn->limit) {
5050 if (!ofm->temp_rule
5051 || ofm->temp_rule->state != RULE_INSERTED) {
5052 /* The learned rule expired and there are no packets, so
5053 * we cannot learn again. Since the translated actions
5054 * depend on the result of learning, we tell the caller
5055 * that there's no point in caching this result. */
5056 ctx->xout->avoid_caching = true;
5057 }
3f3b97b0
DDP
5058 }
5059
4c71600d
DDP
5060 if (learn->flags & NX_LEARN_F_WRITE_RESULT) {
5061 nxm_reg_load(&learn->result_dst, success ? 1 : 0,
5062 &ctx->xin->flow, ctx->wc);
5063 xlate_report_subfield(ctx, &learn->result_dst);
5064 }
5065
5066 if (success && ctx->xin->xcache) {
3f3b97b0
DDP
5067 struct xc_entry *entry;
5068
5069 entry = xlate_cache_add_entry(ctx->xin->xcache, XC_LEARN);
5070 entry->learn.ofm = ofm;
4c71600d 5071 entry->learn.limit = learn->limit;
3f3b97b0 5072 ofm = NULL;
1bddcb5d
YS
5073 } else {
5074 ofproto_flow_mod_uninit(ofm);
3f3b97b0 5075 }
4c71600d
DDP
5076
5077 if (OVS_UNLIKELY(ctx->xin->trace && !success)) {
5078 xlate_report(ctx, OFT_DETAIL, "Limit exceeded, learn failed");
5079 }
3f3b97b0
DDP
5080 }
5081
3f76c123 5082 if (ofm != &ofm__) {
3f3b97b0 5083 free(ofm);
2c7ee524
JR
5084 }
5085
5086 if (error) {
2d9b49dd
BP
5087 xlate_report_error(ctx, "LEARN action execution failed (%s).",
5088 ofperr_to_string(error));
2c7ee524 5089 }
2d9b49dd
BP
5090 } else {
5091 xlate_report(ctx, OFT_WARN,
5092 "suppressing side effects, so learn action ignored");
b256dc52
JS
5093 }
5094}
5095
5096static void
5097xlate_fin_timeout__(struct rule_dpif *rule, uint16_t tcp_flags,
5098 uint16_t idle_timeout, uint16_t hard_timeout)
5099{
5100 if (tcp_flags & (TCP_FIN | TCP_RST)) {
07a3cd5c 5101 ofproto_rule_reduce_timeouts(&rule->up, idle_timeout, hard_timeout);
b256dc52 5102 }
9583bc14
EJ
5103}
5104
9583bc14
EJ
5105static void
5106xlate_fin_timeout(struct xlate_ctx *ctx,
5107 const struct ofpact_fin_timeout *oft)
5108{
b256dc52 5109 if (ctx->rule) {
df70a773
JR
5110 if (ctx->xin->allow_side_effects) {
5111 xlate_fin_timeout__(ctx->rule, ctx->xin->tcp_flags,
5112 oft->fin_idle_timeout, oft->fin_hard_timeout);
5113 }
b256dc52
JS
5114 if (ctx->xin->xcache) {
5115 struct xc_entry *entry;
5116
5117 entry = xlate_cache_add_entry(ctx->xin->xcache, XC_FIN_TIMEOUT);
83709dfa
JR
5118 /* XC_RULE already holds a reference on the rule, none is taken
5119 * here. */
901a517e
JR
5120 entry->fin.rule = ctx->rule;
5121 entry->fin.idle = oft->fin_idle_timeout;
5122 entry->fin.hard = oft->fin_hard_timeout;
b256dc52 5123 }
9583bc14
EJ
5124 }
5125}
5126
5127static void
5128xlate_sample_action(struct xlate_ctx *ctx,
5129 const struct ofpact_sample *os)
5130{
f69f713b
BY
5131 odp_port_t output_odp_port = ODPP_NONE;
5132 odp_port_t tunnel_out_port = ODPP_NONE;
5133 struct dpif_ipfix *ipfix = ctx->xbridge->ipfix;
5134 bool emit_set_tunnel = false;
5135
5136 if (!ipfix || ctx->xin->flow.in_port.ofp_port == OFPP_NONE) {
5137 return;
5138 }
5139
e824d78d
JR
5140 /* Scale the probability from 16-bit to 32-bit while representing
5141 * the same percentage. */
5142 uint32_t probability = (os->probability << 16) | os->probability;
5143
f69f713b
BY
5144 /* If ofp_port in flow sample action is equel to ofp_port,
5145 * this sample action is a input port action. */
5146 if (os->sampling_port != OFPP_NONE &&
5147 os->sampling_port != ctx->xin->flow.in_port.ofp_port) {
5148 output_odp_port = ofp_port_to_odp_port(ctx->xbridge,
5149 os->sampling_port);
5150 if (output_odp_port == ODPP_NONE) {
2d9b49dd
BP
5151 xlate_report_error(ctx, "can't use unknown port %d in flow sample "
5152 "action", os->sampling_port);
f69f713b
BY
5153 return;
5154 }
5155
5156 if (dpif_ipfix_get_flow_exporter_tunnel_sampling(ipfix,
5157 os->collector_set_id)
cd32509e 5158 && dpif_ipfix_is_tunnel_port(ipfix, output_odp_port)) {
f69f713b
BY
5159 tunnel_out_port = output_odp_port;
5160 emit_set_tunnel = true;
5161 }
5162 }
5163
5164 xlate_commit_actions(ctx);
5165 /* If 'emit_set_tunnel', sample(sampling_port=1) would translate
5166 * into datapath sample action set(tunnel(...)), sample(...) and
5167 * it is used for sampling egress tunnel information. */
5168 if (emit_set_tunnel) {
5169 const struct xport *xport = get_ofp_port(ctx->xbridge,
5170 os->sampling_port);
5171
5172 if (xport && xport->is_tunnel) {
5173 struct flow *flow = &ctx->xin->flow;
5174 tnl_port_send(xport->ofport, flow, ctx->wc);
5175 if (!ovs_native_tunneling_is_on(ctx->xbridge->ofproto)) {
5176 struct flow_tnl flow_tnl = flow->tunnel;
5177
5178 commit_odp_tunnel_action(flow, &ctx->base_flow,
5179 ctx->odp_actions);
5180 flow->tunnel = flow_tnl;
5181 }
5182 } else {
2d9b49dd
BP
5183 xlate_report_error(ctx,
5184 "sampling_port:%d should be a tunnel port.",
5185 os->sampling_port);
f69f713b
BY
5186 }
5187 }
e824d78d 5188
8de6ff3e
JP
5189 struct user_action_cookie cookie = {
5190 .type = USER_ACTION_COOKIE_FLOW_SAMPLE,
fcb9579b
JP
5191 .ofp_in_port = ctx->xin->flow.in_port.ofp_port,
5192 .ofproto_uuid = ctx->xbridge->ofproto->uuid,
a6092018 5193 .flow_sample = {
a6092018
BP
5194 .probability = os->probability,
5195 .collector_set_id = os->collector_set_id,
5196 .obs_domain_id = os->obs_domain_id,
5197 .obs_point_id = os->obs_point_id,
f69f713b 5198 .output_odp_port = output_odp_port,
4930ea56 5199 .direction = os->direction,
a6092018
BP
5200 }
5201 };
8de6ff3e 5202 compose_sample_action(ctx, probability, &cookie, tunnel_out_port, false);
9583bc14
EJ
5203}
5204
eee69393
AZ
5205/* Determine if an datapath action translated from the openflow action
5206 * can be reversed by another datapath action.
5207 *
5208 * Openflow actions that do not emit datapath actions are trivially
5209 * reversible. Reversiblity of other actions depends on nature of
5210 * action and their translation. */
5211static bool
5212reversible_actions(const struct ofpact *ofpacts, size_t ofpacts_len)
bef503e8 5213{
eee69393 5214 const struct ofpact *a;
bef503e8 5215
eee69393
AZ
5216 OFPACT_FOR_EACH (a, ofpacts, ofpacts_len) {
5217 switch (a->type) {
5218 case OFPACT_BUNDLE:
5219 case OFPACT_CLEAR_ACTIONS:
5220 case OFPACT_CLONE:
5221 case OFPACT_CONJUNCTION:
5222 case OFPACT_CONTROLLER:
5223 case OFPACT_CT_CLEAR:
5224 case OFPACT_DEBUG_RECIRC:
a934a3dd 5225 case OFPACT_DEBUG_SLOW:
eee69393
AZ
5226 case OFPACT_DEC_MPLS_TTL:
5227 case OFPACT_DEC_TTL:
5228 case OFPACT_ENQUEUE:
5229 case OFPACT_EXIT:
5230 case OFPACT_FIN_TIMEOUT:
5231 case OFPACT_GOTO_TABLE:
5232 case OFPACT_GROUP:
5233 case OFPACT_LEARN:
5234 case OFPACT_MULTIPATH:
5235 case OFPACT_NOTE:
5236 case OFPACT_OUTPUT:
5237 case OFPACT_OUTPUT_REG:
5238 case OFPACT_POP_MPLS:
5239 case OFPACT_POP_QUEUE:
5240 case OFPACT_PUSH_MPLS:
5241 case OFPACT_PUSH_VLAN:
5242 case OFPACT_REG_MOVE:
5243 case OFPACT_RESUBMIT:
5244 case OFPACT_SAMPLE:
5245 case OFPACT_SET_ETH_DST:
5246 case OFPACT_SET_ETH_SRC:
5247 case OFPACT_SET_FIELD:
5248 case OFPACT_SET_IP_DSCP:
5249 case OFPACT_SET_IP_ECN:
5250 case OFPACT_SET_IP_TTL:
5251 case OFPACT_SET_IPV4_DST:
5252 case OFPACT_SET_IPV4_SRC:
5253 case OFPACT_SET_L4_DST_PORT:
5254 case OFPACT_SET_L4_SRC_PORT:
5255 case OFPACT_SET_MPLS_LABEL:
5256 case OFPACT_SET_MPLS_TC:
5257 case OFPACT_SET_MPLS_TTL:
5258 case OFPACT_SET_QUEUE:
5259 case OFPACT_SET_TUNNEL:
5260 case OFPACT_SET_VLAN_PCP:
5261 case OFPACT_SET_VLAN_VID:
5262 case OFPACT_STACK_POP:
5263 case OFPACT_STACK_PUSH:
5264 case OFPACT_STRIP_VLAN:
5265 case OFPACT_UNROLL_XLATE:
5266 case OFPACT_WRITE_ACTIONS:
5267 case OFPACT_WRITE_METADATA:
5268 break;
5269
5270 case OFPACT_CT:
5271 case OFPACT_METER:
5272 case OFPACT_NAT:
5273 case OFPACT_OUTPUT_TRUNC:
2142be1f
BP
5274 case OFPACT_ENCAP:
5275 case OFPACT_DECAP:
491e05c2 5276 case OFPACT_DEC_NSH_TTL:
eee69393 5277 return false;
9c2a44dc 5278 }
456024cb 5279 }
eee69393 5280 return true;
bef503e8
AZ
5281}
5282
5283static void
c9f0a445 5284clone_xlate_actions(const struct ofpact *actions, size_t actions_len,
feee58b9 5285 struct xlate_ctx *ctx, bool is_last_action)
7ae62a67 5286{
b827b231
BP
5287 struct ofpbuf old_stack = ctx->stack;
5288 union mf_subvalue new_stack[1024 / sizeof(union mf_subvalue)];
5289 ofpbuf_use_stub(&ctx->stack, new_stack, sizeof new_stack);
5290 ofpbuf_put(&ctx->stack, old_stack.data, old_stack.size);
5291
5292 struct ofpbuf old_action_set = ctx->action_set;
5293 uint64_t actset_stub[1024 / 8];
5294 ofpbuf_use_stub(&ctx->action_set, actset_stub, sizeof actset_stub);
5295 ofpbuf_put(&ctx->action_set, old_action_set.data, old_action_set.size);
5296
eee69393 5297 size_t offset, ac_offset;
eee69393
AZ
5298 struct flow old_flow = ctx->xin->flow;
5299
feee58b9 5300 if (reversible_actions(actions, actions_len) || is_last_action) {
eee69393 5301 old_flow = ctx->xin->flow;
feee58b9 5302 do_xlate_actions(actions, actions_len, ctx, is_last_action);
058d3a4a
EG
5303 if (!ctx->freezing) {
5304 xlate_action_set(ctx);
5305 }
60eebf12
AZ
5306 if (ctx->freezing) {
5307 finish_freezing(ctx);
5308 }
eee69393
AZ
5309 goto xlate_done;
5310 }
5311
5312 /* Commit datapath actions before emitting the clone action to
5313 * avoid emitting those actions twice. Once inside
5314 * the clone, another time for the action after clone. */
5315 xlate_commit_actions(ctx);
9c2a44dc 5316 struct flow old_base = ctx->base_flow;
eee69393
AZ
5317 bool old_was_mpls = ctx->was_mpls;
5318 bool old_conntracked = ctx->conntracked;
ba653d2a 5319
eee69393
AZ
5320 /* The actions are not reversible, a datapath clone action is
5321 * required to encode the translation. Select the clone action
5322 * based on datapath capabilities. */
5323 if (ctx->xbridge->support.clone) { /* Use clone action */
5324 /* Use clone action as datapath clone. */
5325 offset = nl_msg_start_nested(ctx->odp_actions, OVS_ACTION_ATTR_CLONE);
feee58b9 5326 do_xlate_actions(actions, actions_len, ctx, true);
058d3a4a
EG
5327 if (!ctx->freezing) {
5328 xlate_action_set(ctx);
5329 }
60eebf12
AZ
5330 if (ctx->freezing) {
5331 finish_freezing(ctx);
5332 }
eee69393
AZ
5333 nl_msg_end_non_empty_nested(ctx->odp_actions, offset);
5334 goto dp_clone_done;
5335 }
b827b231 5336
eee69393
AZ
5337 if (ctx->xbridge->support.sample_nesting > 3) {
5338 /* Use sample action as datapath clone. */
5339 offset = nl_msg_start_nested(ctx->odp_actions, OVS_ACTION_ATTR_SAMPLE);
5340 ac_offset = nl_msg_start_nested(ctx->odp_actions,
5341 OVS_SAMPLE_ATTR_ACTIONS);
feee58b9 5342 do_xlate_actions(actions, actions_len, ctx, true);
058d3a4a
EG
5343 if (!ctx->freezing) {
5344 xlate_action_set(ctx);
5345 }
60eebf12
AZ
5346 if (ctx->freezing) {
5347 finish_freezing(ctx);
5348 }
eee69393
AZ
5349 if (nl_msg_end_non_empty_nested(ctx->odp_actions, ac_offset)) {
5350 nl_msg_cancel_nested(ctx->odp_actions, offset);
5351 } else {
5352 nl_msg_put_u32(ctx->odp_actions, OVS_SAMPLE_ATTR_PROBABILITY,
5353 UINT32_MAX); /* 100% probability. */
5354 nl_msg_end_nested(ctx->odp_actions, offset);
5355 }
5356 goto dp_clone_done;
5357 }
5358
5359 /* Datapath does not support clone, skip xlate 'oc' and
5360 * report an error */
5361 xlate_report_error(ctx, "Failed to compose clone action");
ba653d2a 5362
eee69393 5363dp_clone_done:
ba653d2a
BP
5364 /* The clone's conntrack execution should have no effect on the original
5365 * packet. */
5366 ctx->conntracked = old_conntracked;
bd3c2df3
BP
5367
5368 /* Popping MPLS from the clone should have no effect on the original
5369 * packet. */
5370 ctx->was_mpls = old_was_mpls;
eee69393
AZ
5371
5372 /* Restore the 'base_flow' for the next action. */
5373 ctx->base_flow = old_base;
5374
5375xlate_done:
5376 ofpbuf_uninit(&ctx->action_set);
5377 ctx->action_set = old_action_set;
5378 ofpbuf_uninit(&ctx->stack);
5379 ctx->stack = old_stack;
5380 ctx->xin->flow = old_flow;
7ae62a67
WT
5381}
5382
c9f0a445 5383static void
feee58b9
AZ
5384compose_clone(struct xlate_ctx *ctx, const struct ofpact_nest *oc,
5385 bool is_last_action)
c9f0a445
AZ
5386{
5387 size_t oc_actions_len = ofpact_nest_get_action_len(oc);
5388
feee58b9 5389 clone_xlate_actions(oc->actions, oc_actions_len, ctx, is_last_action);
c9f0a445
AZ
5390}
5391
076caa2f
JR
5392static void
5393xlate_meter_action(struct xlate_ctx *ctx, const struct ofpact_meter *meter)
5394{
5395 if (meter->provider_meter_id != UINT32_MAX) {
5396 nl_msg_put_u32(ctx->odp_actions, OVS_ACTION_ATTR_METER,
5397 meter->provider_meter_id);
5398 }
5399}
5400
9583bc14 5401static bool
46c88433 5402may_receive(const struct xport *xport, struct xlate_ctx *ctx)
9583bc14 5403{
bbbca389 5404 if (xport->config & (is_stp(&ctx->xin->flow)
46c88433
EJ
5405 ? OFPUTIL_PC_NO_RECV_STP
5406 : OFPUTIL_PC_NO_RECV)) {
9583bc14
EJ
5407 return false;
5408 }
5409
5410 /* Only drop packets here if both forwarding and learning are
5411 * disabled. If just learning is enabled, we need to have
5412 * OFPP_NORMAL and the learning action have a look at the packet
5413 * before we can drop it. */
9efd308e
DV
5414 if ((!xport_stp_forward_state(xport) && !xport_stp_learn_state(xport)) ||
5415 (!xport_rstp_forward_state(xport) && !xport_rstp_learn_state(xport))) {
9583bc14
EJ
5416 return false;
5417 }
5418
5419 return true;
5420}
5421
7fdb60a7 5422static void
7e7e8dbb
BP
5423xlate_write_actions__(struct xlate_ctx *ctx,
5424 const struct ofpact *ofpacts, size_t ofpacts_len)
7fdb60a7 5425{
c61f3870
BP
5426 /* Maintain actset_output depending on the contents of the action set:
5427 *
5428 * - OFPP_UNSET, if there is no "output" action.
5429 *
5430 * - The output port, if there is an "output" action and no "group"
5431 * action.
5432 *
5433 * - OFPP_UNSET, if there is a "group" action.
5434 */
5435 if (!ctx->action_set_has_group) {
7e7e8dbb
BP
5436 const struct ofpact *a;
5437 OFPACT_FOR_EACH (a, ofpacts, ofpacts_len) {
5438 if (a->type == OFPACT_OUTPUT) {
5439 ctx->xin->flow.actset_output = ofpact_get_OUTPUT(a)->port;
5440 } else if (a->type == OFPACT_GROUP) {
c61f3870
BP
5441 ctx->xin->flow.actset_output = OFPP_UNSET;
5442 ctx->action_set_has_group = true;
9055ca9a 5443 break;
c61f3870
BP
5444 }
5445 }
5446 }
5447
7e7e8dbb
BP
5448 ofpbuf_put(&ctx->action_set, ofpacts, ofpacts_len);
5449}
5450
5451static void
5452xlate_write_actions(struct xlate_ctx *ctx, const struct ofpact_nest *a)
5453{
5454 xlate_write_actions__(ctx, a->actions, ofpact_nest_get_action_len(a));
7fdb60a7
SH
5455}
5456
5457static void
5458xlate_action_set(struct xlate_ctx *ctx)
5459{
2d9b49dd
BP
5460 uint64_t action_list_stub[1024 / 8];
5461 struct ofpbuf action_list = OFPBUF_STUB_INITIALIZER(action_list_stub);
7fdb60a7 5462 ofpacts_execute_action_set(&action_list, &ctx->action_set);
ed9c9e3e
JR
5463 /* Clear the action set, as it is not needed any more. */
5464 ofpbuf_clear(&ctx->action_set);
2d9b49dd
BP
5465 if (action_list.size) {
5466 ctx->in_action_set = true;
5467
5468 struct ovs_list *old_trace = ctx->xin->trace;
5469 ctx->xin->trace = xlate_report(ctx, OFT_TABLE,
5470 "--. Executing action set:");
feee58b9 5471 do_xlate_actions(action_list.data, action_list.size, ctx, true);
2d9b49dd
BP
5472 ctx->xin->trace = old_trace;
5473
5474 ctx->in_action_set = false;
5475 }
7fdb60a7
SH
5476 ofpbuf_uninit(&action_list);
5477}
5478
e672ff9b 5479static void
1d361a81 5480freeze_put_unroll_xlate(struct xlate_ctx *ctx)
e672ff9b 5481{
1d361a81 5482 struct ofpact_unroll_xlate *unroll = ctx->frozen_actions.header;
e672ff9b
JR
5483
5484 /* Restore the table_id and rule cookie for a potential PACKET
5485 * IN if needed. */
5486 if (!unroll ||
5487 (ctx->table_id != unroll->rule_table_id
5488 || ctx->rule_cookie != unroll->rule_cookie)) {
1d361a81 5489 unroll = ofpact_put_UNROLL_XLATE(&ctx->frozen_actions);
e672ff9b
JR
5490 unroll->rule_table_id = ctx->table_id;
5491 unroll->rule_cookie = ctx->rule_cookie;
1d361a81 5492 ctx->frozen_actions.header = unroll;
e672ff9b
JR
5493 }
5494}
5495
5496
1d361a81
BP
5497/* Copy actions 'a' through 'end' to ctx->frozen_actions, which will be
5498 * executed after thawing. Inserts an UNROLL_XLATE action, if none is already
5499 * present, before any action that may depend on the current table ID or flow
5500 * cookie. */
e672ff9b 5501static void
1d361a81 5502freeze_unroll_actions(const struct ofpact *a, const struct ofpact *end,
e672ff9b
JR
5503 struct xlate_ctx *ctx)
5504{
c2b283b7 5505 for (; a < end; a = ofpact_next(a)) {
e672ff9b 5506 switch (a->type) {
e672ff9b 5507 case OFPACT_OUTPUT_REG:
aaca4fe0 5508 case OFPACT_OUTPUT_TRUNC:
e672ff9b
JR
5509 case OFPACT_GROUP:
5510 case OFPACT_OUTPUT:
5511 case OFPACT_CONTROLLER:
5512 case OFPACT_DEC_MPLS_TTL:
491e05c2 5513 case OFPACT_DEC_NSH_TTL:
e672ff9b 5514 case OFPACT_DEC_TTL:
83a31283
BP
5515 /* These actions may generate asynchronous messages, which include
5516 * table ID and flow cookie information. */
1d361a81 5517 freeze_put_unroll_xlate(ctx);
e672ff9b
JR
5518 break;
5519
83a31283
BP
5520 case OFPACT_RESUBMIT:
5521 if (ofpact_get_RESUBMIT(a)->table_id == 0xff) {
5522 /* This resubmit action is relative to the current table, so we
5523 * need to track what table that is.*/
1d361a81 5524 freeze_put_unroll_xlate(ctx);
83a31283
BP
5525 }
5526 break;
5527
e672ff9b
JR
5528 case OFPACT_SET_TUNNEL:
5529 case OFPACT_REG_MOVE:
5530 case OFPACT_SET_FIELD:
5531 case OFPACT_STACK_PUSH:
5532 case OFPACT_STACK_POP:
5533 case OFPACT_LEARN:
5534 case OFPACT_WRITE_METADATA:
83a31283 5535 case OFPACT_GOTO_TABLE:
e672ff9b
JR
5536 case OFPACT_ENQUEUE:
5537 case OFPACT_SET_VLAN_VID:
5538 case OFPACT_SET_VLAN_PCP:
5539 case OFPACT_STRIP_VLAN:
5540 case OFPACT_PUSH_VLAN:
5541 case OFPACT_SET_ETH_SRC:
5542 case OFPACT_SET_ETH_DST:
5543 case OFPACT_SET_IPV4_SRC:
5544 case OFPACT_SET_IPV4_DST:
5545 case OFPACT_SET_IP_DSCP:
5546 case OFPACT_SET_IP_ECN:
5547 case OFPACT_SET_IP_TTL:
5548 case OFPACT_SET_L4_SRC_PORT:
5549 case OFPACT_SET_L4_DST_PORT:
5550 case OFPACT_SET_QUEUE:
5551 case OFPACT_POP_QUEUE:
5552 case OFPACT_PUSH_MPLS:
5553 case OFPACT_POP_MPLS:
5554 case OFPACT_SET_MPLS_LABEL:
5555 case OFPACT_SET_MPLS_TC:
5556 case OFPACT_SET_MPLS_TTL:
5557 case OFPACT_MULTIPATH:
5558 case OFPACT_BUNDLE:
5559 case OFPACT_EXIT:
5560 case OFPACT_UNROLL_XLATE:
5561 case OFPACT_FIN_TIMEOUT:
5562 case OFPACT_CLEAR_ACTIONS:
5563 case OFPACT_WRITE_ACTIONS:
5564 case OFPACT_METER:
5565 case OFPACT_SAMPLE:
7ae62a67 5566 case OFPACT_CLONE:
f839892a
JS
5567 case OFPACT_ENCAP:
5568 case OFPACT_DECAP:
d4abaff5 5569 case OFPACT_DEBUG_RECIRC:
a934a3dd 5570 case OFPACT_DEBUG_SLOW:
07659514 5571 case OFPACT_CT:
72fe7578 5572 case OFPACT_CT_CLEAR:
9ac0aada 5573 case OFPACT_NAT:
83a31283 5574 /* These may not generate PACKET INs. */
e672ff9b
JR
5575 break;
5576
e672ff9b
JR
5577 case OFPACT_NOTE:
5578 case OFPACT_CONJUNCTION:
83a31283 5579 /* These need not be copied for restoration. */
e672ff9b
JR
5580 continue;
5581 }
5582 /* Copy the action over. */
1d361a81 5583 ofpbuf_put(&ctx->frozen_actions, a, OFPACT_ALIGN(a->len));
e672ff9b
JR
5584 }
5585}
5586
8e53fe8c 5587static void
f2d105b5
JS
5588put_ct_mark(const struct flow *flow, struct ofpbuf *odp_actions,
5589 struct flow_wildcards *wc)
8e53fe8c 5590{
2a754f4a
JS
5591 if (wc->masks.ct_mark) {
5592 struct {
5593 uint32_t key;
5594 uint32_t mask;
5595 } *odp_ct_mark;
5596
5597 odp_ct_mark = nl_msg_put_unspec_uninit(odp_actions, OVS_CT_ATTR_MARK,
5598 sizeof(*odp_ct_mark));
5599 odp_ct_mark->key = flow->ct_mark & wc->masks.ct_mark;
5600 odp_ct_mark->mask = wc->masks.ct_mark;
8e53fe8c
JS
5601 }
5602}
5603
9daf2348 5604static void
f2d105b5
JS
5605put_ct_label(const struct flow *flow, struct ofpbuf *odp_actions,
5606 struct flow_wildcards *wc)
9daf2348 5607{
2ff8484b 5608 if (!ovs_u128_is_zero(wc->masks.ct_label)) {
9daf2348
JS
5609 struct {
5610 ovs_u128 key;
5611 ovs_u128 mask;
89cf41ec 5612 } odp_ct_label;
9daf2348 5613
89cf41ec
BP
5614 odp_ct_label.key = ovs_u128_and(flow->ct_label, wc->masks.ct_label);
5615 odp_ct_label.mask = wc->masks.ct_label;
5616 nl_msg_put_unspec(odp_actions, OVS_CT_ATTR_LABELS,
5617 &odp_ct_label, sizeof odp_ct_label);
9daf2348
JS
5618 }
5619}
5620
d787ad39 5621static void
2d9b49dd
BP
5622put_ct_helper(struct xlate_ctx *ctx,
5623 struct ofpbuf *odp_actions, struct ofpact_conntrack *ofc)
d787ad39
JS
5624{
5625 if (ofc->alg) {
40c7b2fc
JS
5626 switch(ofc->alg) {
5627 case IPPORT_FTP:
d787ad39 5628 nl_msg_put_string(odp_actions, OVS_CT_ATTR_HELPER, "ftp");
40c7b2fc
JS
5629 break;
5630 case IPPORT_TFTP:
5631 nl_msg_put_string(odp_actions, OVS_CT_ATTR_HELPER, "tftp");
5632 break;
5633 default:
2d9b49dd 5634 xlate_report_error(ctx, "cannot serialize ct_helper %d", ofc->alg);
40c7b2fc 5635 break;
d787ad39
JS
5636 }
5637 }
5638}
5639
9ac0aada
JR
5640static void
5641put_ct_nat(struct xlate_ctx *ctx)
5642{
5643 struct ofpact_nat *ofn = ctx->ct_nat_action;
5644 size_t nat_offset;
5645
5646 if (!ofn) {
5647 return;
5648 }
5649
5650 nat_offset = nl_msg_start_nested(ctx->odp_actions, OVS_CT_ATTR_NAT);
5651 if (ofn->flags & NX_NAT_F_SRC || ofn->flags & NX_NAT_F_DST) {
5652 nl_msg_put_flag(ctx->odp_actions, ofn->flags & NX_NAT_F_SRC
5653 ? OVS_NAT_ATTR_SRC : OVS_NAT_ATTR_DST);
5654 if (ofn->flags & NX_NAT_F_PERSISTENT) {
5655 nl_msg_put_flag(ctx->odp_actions, OVS_NAT_ATTR_PERSISTENT);
5656 }
5657 if (ofn->flags & NX_NAT_F_PROTO_HASH) {
5658 nl_msg_put_flag(ctx->odp_actions, OVS_NAT_ATTR_PROTO_HASH);
5659 } else if (ofn->flags & NX_NAT_F_PROTO_RANDOM) {
5660 nl_msg_put_flag(ctx->odp_actions, OVS_NAT_ATTR_PROTO_RANDOM);
5661 }
5662 if (ofn->range_af == AF_INET) {
73e8bc23 5663 nl_msg_put_be32(ctx->odp_actions, OVS_NAT_ATTR_IP_MIN,
9ac0aada
JR
5664 ofn->range.addr.ipv4.min);
5665 if (ofn->range.addr.ipv4.max &&
73e8bc23
BP
5666 (ntohl(ofn->range.addr.ipv4.max)
5667 > ntohl(ofn->range.addr.ipv4.min))) {
5668 nl_msg_put_be32(ctx->odp_actions, OVS_NAT_ATTR_IP_MAX,
5669 ofn->range.addr.ipv4.max);
9ac0aada
JR
5670 }
5671 } else if (ofn->range_af == AF_INET6) {
5672 nl_msg_put_unspec(ctx->odp_actions, OVS_NAT_ATTR_IP_MIN,
5673 &ofn->range.addr.ipv6.min,
5674 sizeof ofn->range.addr.ipv6.min);
5675 if (!ipv6_mask_is_any(&ofn->range.addr.ipv6.max) &&
5676 memcmp(&ofn->range.addr.ipv6.max, &ofn->range.addr.ipv6.min,
5677 sizeof ofn->range.addr.ipv6.max) > 0) {
5678 nl_msg_put_unspec(ctx->odp_actions, OVS_NAT_ATTR_IP_MAX,
5679 &ofn->range.addr.ipv6.max,
5680 sizeof ofn->range.addr.ipv6.max);
5681 }
5682 }
5683 if (ofn->range_af != AF_UNSPEC && ofn->range.proto.min) {
5684 nl_msg_put_u16(ctx->odp_actions, OVS_NAT_ATTR_PROTO_MIN,
5685 ofn->range.proto.min);
5686 if (ofn->range.proto.max &&
5687 ofn->range.proto.max > ofn->range.proto.min) {
5688 nl_msg_put_u16(ctx->odp_actions, OVS_NAT_ATTR_PROTO_MAX,
5689 ofn->range.proto.max);
5690 }
5691 }
5692 }
5693 nl_msg_end_nested(ctx->odp_actions, nat_offset);
5694}
5695
07659514 5696static void
feee58b9
AZ
5697compose_conntrack_action(struct xlate_ctx *ctx, struct ofpact_conntrack *ofc,
5698 bool is_last_action)
07659514 5699{
f2d105b5 5700 ovs_u128 old_ct_label_mask = ctx->wc->masks.ct_label;
f2d105b5 5701 uint32_t old_ct_mark_mask = ctx->wc->masks.ct_mark;
07659514
JS
5702 size_t ct_offset;
5703 uint16_t zone;
5704
5705 /* Ensure that any prior actions are applied before composing the new
5706 * conntrack action. */
5707 xlate_commit_actions(ctx);
5708
8e53fe8c 5709 /* Process nested actions first, to populate the key. */
9ac0aada 5710 ctx->ct_nat_action = NULL;
f2d105b5 5711 ctx->wc->masks.ct_mark = 0;
f6fabcc6 5712 ctx->wc->masks.ct_label = OVS_U128_ZERO;
feee58b9
AZ
5713 do_xlate_actions(ofc->actions, ofpact_ct_get_action_len(ofc), ctx,
5714 is_last_action);
8e53fe8c 5715
07659514
JS
5716 if (ofc->zone_src.field) {
5717 zone = mf_get_subfield(&ofc->zone_src, &ctx->xin->flow);
5718 } else {
5719 zone = ofc->zone_imm;
5720 }
5721
5722 ct_offset = nl_msg_start_nested(ctx->odp_actions, OVS_ACTION_ATTR_CT);
5723 if (ofc->flags & NX_CT_F_COMMIT) {
a76a37ef
JR
5724 nl_msg_put_flag(ctx->odp_actions, ofc->flags & NX_CT_F_FORCE ?
5725 OVS_CT_ATTR_FORCE_COMMIT : OVS_CT_ATTR_COMMIT);
adfe7a0b
JR
5726 if (ctx->xbridge->support.ct_eventmask) {
5727 nl_msg_put_u32(ctx->odp_actions, OVS_CT_ATTR_EVENTMASK,
975954af 5728 OVS_CT_EVENTMASK_DEFAULT);
adfe7a0b 5729 }
07659514
JS
5730 }
5731 nl_msg_put_u16(ctx->odp_actions, OVS_CT_ATTR_ZONE, zone);
f2d105b5
JS
5732 put_ct_mark(&ctx->xin->flow, ctx->odp_actions, ctx->wc);
5733 put_ct_label(&ctx->xin->flow, ctx->odp_actions, ctx->wc);
2d9b49dd 5734 put_ct_helper(ctx, ctx->odp_actions, ofc);
9ac0aada
JR
5735 put_ct_nat(ctx);
5736 ctx->ct_nat_action = NULL;
07659514
JS
5737 nl_msg_end_nested(ctx->odp_actions, ct_offset);
5738
f2d105b5 5739 ctx->wc->masks.ct_mark = old_ct_mark_mask;
f2d105b5 5740 ctx->wc->masks.ct_label = old_ct_label_mask;
8e53fe8c 5741
f6fabcc6 5742 if (ofc->recirc_table != NX_CT_RECIRC_NONE) {
07659514 5743 ctx->conntracked = true;
5fdd80cc 5744 compose_recirculate_and_fork(ctx, ofc->recirc_table, zone);
07659514 5745 }
f6fabcc6
JP
5746
5747 /* The ct_* fields are only available in the scope of the 'recirc_table'
5748 * call chain. */
5749 flow_clear_conntrack(&ctx->xin->flow);
5750 ctx->conntracked = false;
07659514
JS
5751}
5752
88a20f8c
EG
5753static void
5754compose_ct_clear_action(struct xlate_ctx *ctx)
5755{
5756 clear_conntrack(ctx);
5757 /* This action originally existed without dpif support. So to preserve
5758 * compatibility, only append it if the dpif supports it. */
5759 if (ctx->xbridge->support.ct_clear) {
5760 nl_msg_put_flag(ctx->odp_actions, OVS_ACTION_ATTR_CT_CLEAR);
5761 }
5762}
5763
f839892a
JS
5764static void
5765rewrite_flow_encap_ethernet(struct xlate_ctx *ctx,
5766 struct flow *flow,
5767 struct flow_wildcards *wc)
5768{
5769 wc->masks.packet_type = OVS_BE32_MAX;
5770 if (pt_ns(flow->packet_type) == OFPHTN_ETHERTYPE) {
5771 /* Only adjust the packet_type and zero the dummy Ethernet addresses. */
5772 ovs_be16 ethertype = pt_ns_type_be(flow->packet_type);
5773 flow->packet_type = htonl(PT_ETH);
5774 flow->dl_src = eth_addr_zero;
5775 flow->dl_dst = eth_addr_zero;
5776 flow->dl_type = ethertype;
5777 } else {
1fc11c59 5778 /* Error handling: drop packet. */
f839892a 5779 xlate_report_debug(ctx, OFT_ACTION,
1fc11c59
JS
5780 "Dropping packet as encap(ethernet) is not "
5781 "supported for packet type ethernet.");
7873e106 5782 ctx->error = XLATE_UNSUPPORTED_PACKET_TYPE;
f839892a
JS
5783 }
5784}
5785
1fc11c59
JS
5786/* For an MD2 NSH header returns a pointer to an ofpbuf with the encoded
5787 * MD2 TLVs provided as encap properties to the encap operation. This
f59cb331 5788 * will be stored as encap_data in the ctx and copied into the push_nsh
1fc11c59
JS
5789 * action at the next commit. */
5790static struct ofpbuf *
f59cb331
YY
5791rewrite_flow_push_nsh(struct xlate_ctx *ctx,
5792 const struct ofpact_encap *encap,
5793 struct flow *flow,
5794 struct flow_wildcards *wc)
1fc11c59
JS
5795{
5796 ovs_be32 packet_type = flow->packet_type;
5797 const char *ptr = (char *) encap->props;
f59cb331 5798 struct ofpbuf *buf = ofpbuf_new(NSH_CTX_HDRS_MAX_LEN);
1fc11c59
JS
5799 uint8_t md_type = NSH_M_TYPE1;
5800 uint8_t np = 0;
5801 int i;
5802
5803 /* Scan the optional NSH encap TLV properties, if any. */
5804 for (i = 0; i < encap->n_props; i++) {
5805 struct ofpact_ed_prop *prop_ptr =
5806 ALIGNED_CAST(struct ofpact_ed_prop *, ptr);
5807 if (prop_ptr->prop_class == OFPPPC_NSH) {
5808 switch (prop_ptr->type) {
5809 case OFPPPT_PROP_NSH_MDTYPE: {
5810 struct ofpact_ed_prop_nsh_md_type *prop_md_type =
5811 ALIGNED_CAST(struct ofpact_ed_prop_nsh_md_type *,
5812 prop_ptr);
5813 md_type = prop_md_type->md_type;
5814 break;
5815 }
5816 case OFPPPT_PROP_NSH_TLV: {
5817 struct ofpact_ed_prop_nsh_tlv *tlv_prop =
5818 ALIGNED_CAST(struct ofpact_ed_prop_nsh_tlv *,
5819 prop_ptr);
5820 struct nsh_md2_tlv *md2_ctx =
5821 ofpbuf_put_uninit(buf, sizeof(*md2_ctx));
5822 md2_ctx->md_class = tlv_prop->tlv_class;
5823 md2_ctx->type = tlv_prop->tlv_type;
5824 md2_ctx->length = tlv_prop->tlv_len;
5825 size_t len = ROUND_UP(md2_ctx->length, 4);
5826 size_t padding = len - md2_ctx->length;
5827 ofpbuf_put(buf, tlv_prop->data, md2_ctx->length);
5828 ofpbuf_put_zeros(buf, padding);
5829 break;
5830 }
5831 default:
5832 /* No other NSH encap properties defined yet. */
5833 break;
5834 }
5835 }
5836 ptr += ROUND_UP(prop_ptr->len, 8);
5837 }
f59cb331 5838 if (buf->size == 0 || buf->size > NSH_CTX_HDRS_MAX_LEN) {
1fc11c59
JS
5839 ofpbuf_delete(buf);
5840 buf = NULL;
5841 }
5842
5843 /* Determine the Next Protocol field for NSH header. */
5844 switch (ntohl(packet_type)) {
5845 case PT_ETH:
5846 np = NSH_P_ETHERNET;
5847 break;
5848 case PT_IPV4:
5849 np = NSH_P_IPV4;
5850 break;
5851 case PT_IPV6:
5852 np = NSH_P_IPV6;
5853 break;
5854 case PT_NSH:
5855 np = NSH_P_NSH;
5856 break;
5857 default:
5858 /* Error handling: drop packet. */
5859 xlate_report_debug(ctx, OFT_ACTION,
5860 "Dropping packet as encap(nsh) is not "
5861 "supported for packet type (%d,0x%x)",
5862 pt_ns(packet_type), pt_ns_type(packet_type));
7873e106 5863 ctx->error = XLATE_UNSUPPORTED_PACKET_TYPE;
1fc11c59
JS
5864 return buf;
5865 }
5866 /* Note that we have matched on packet_type! */
5867 wc->masks.packet_type = OVS_BE32_MAX;
5868
5869 /* Reset all current flow packet headers. */
5870 memset(&flow->dl_dst, 0,
5871 sizeof(struct flow) - offsetof(struct flow, dl_dst));
5872
5873 /* Populate the flow with the new NSH header. */
5874 flow->packet_type = htonl(PT_NSH);
5875 flow->dl_type = htons(ETH_TYPE_NSH);
17553f27
YY
5876 flow->nsh.flags = 0;
5877 flow->nsh.ttl = 63;
1fc11c59 5878 flow->nsh.np = np;
17553f27 5879 flow->nsh.path_hdr = htonl(255);
1fc11c59
JS
5880
5881 if (md_type == NSH_M_TYPE1) {
5882 flow->nsh.mdtype = NSH_M_TYPE1;
f59cb331 5883 memset(flow->nsh.context, 0, sizeof flow->nsh.context);
1fc11c59
JS
5884 if (buf) {
5885 /* Drop any MD2 context TLVs. */
5886 ofpbuf_delete(buf);
5887 buf = NULL;
5888 }
5889 } else if (md_type == NSH_M_TYPE2) {
5890 flow->nsh.mdtype = NSH_M_TYPE2;
5891 }
17553f27 5892 flow->nsh.mdtype &= NSH_MDTYPE_MASK;
1fc11c59
JS
5893
5894 return buf;
5895}
5896
f839892a
JS
5897static void
5898xlate_generic_encap_action(struct xlate_ctx *ctx,
5899 const struct ofpact_encap *encap)
5900{
5901 struct flow *flow = &ctx->xin->flow;
5902 struct flow_wildcards *wc = ctx->wc;
1fc11c59 5903 struct ofpbuf *encap_data = NULL;
f839892a
JS
5904
5905 /* Ensure that any pending actions on the inner packet are applied before
5906 * rewriting the flow */
5907 xlate_commit_actions(ctx);
5908
5909 /* Rewrite the flow to reflect the effect of pushing the new encap header. */
5910 switch (ntohl(encap->new_pkt_type)) {
5911 case PT_ETH:
5912 rewrite_flow_encap_ethernet(ctx, flow, wc);
5913 break;
1fc11c59 5914 case PT_NSH:
f59cb331 5915 encap_data = rewrite_flow_push_nsh(ctx, encap, flow, wc);
1fc11c59 5916 break;
f839892a 5917 default:
1fc11c59
JS
5918 /* New packet type was checked during decoding. */
5919 OVS_NOT_REACHED();
f839892a
JS
5920 }
5921
5922 if (!ctx->error) {
5923 /* The actual encap datapath action will be generated at next commit. */
5924 ctx->pending_encap = true;
1fc11c59 5925 ctx->encap_data = encap_data;
f839892a
JS
5926 }
5927}
5928
5929/* Returns true if packet must be recirculated after decapsulation. */
5930static bool
5931xlate_generic_decap_action(struct xlate_ctx *ctx,
5932 const struct ofpact_decap *decap OVS_UNUSED)
5933{
5934 struct flow *flow = &ctx->xin->flow;
5935
5936 /* Ensure that any pending actions on the current packet are applied
5937 * before generating the decap action. */
5938 xlate_commit_actions(ctx);
5939
5940 /* We assume for now that the new_pkt_type is PT_USE_NEXT_PROTO. */
5941 switch (ntohl(flow->packet_type)) {
5942 case PT_ETH:
5943 if (flow->vlans[0].tci & htons(VLAN_CFI)) {
5944 /* Error handling: drop packet. */
5945 xlate_report_debug(ctx, OFT_ACTION, "Dropping packet, cannot "
5946 "decap Ethernet if VLAN is present.");
7873e106 5947 ctx->error = XLATE_UNSUPPORTED_PACKET_TYPE;
f839892a
JS
5948 } else {
5949 /* Just change the packet_type.
5950 * Delay generating pop_eth to the next commit. */
5951 flow->packet_type = htonl(PACKET_TYPE(OFPHTN_ETHERTYPE,
5952 ntohs(flow->dl_type)));
5953 ctx->wc->masks.dl_type = OVS_BE16_MAX;
5954 }
5955 return false;
1fc11c59 5956 case PT_NSH:
f59cb331 5957 /* The pop_nsh action is generated at the commit executed as
1fc11c59
JS
5958 * part of freezing the ctx for recirculation. Here we just set
5959 * the new packet type based on the NSH next protocol field. */
5960 switch (flow->nsh.np) {
5961 case NSH_P_ETHERNET:
5962 flow->packet_type = htonl(PT_ETH);
5963 break;
5964 case NSH_P_IPV4:
5965 flow->packet_type = htonl(PT_IPV4);
5966 break;
5967 case NSH_P_IPV6:
5968 flow->packet_type = htonl(PT_IPV6);
5969 break;
5970 case NSH_P_NSH:
5971 flow->packet_type = htonl(PT_NSH);
5972 break;
5973 default:
5974 /* Error handling: drop packet. */
5975 xlate_report_debug(ctx, OFT_ACTION,
5976 "Dropping packet as NSH next protocol %d "
5977 "is not supported", flow->nsh.np);
7873e106 5978 ctx->error = XLATE_UNSUPPORTED_PACKET_TYPE;
1fc11c59
JS
5979 return false;
5980 break;
5981 }
5982 ctx->wc->masks.nsh.np = UINT8_MAX;
5983 /* Trigger recirculation. */
5984 return true;
f839892a 5985 default:
1fc11c59
JS
5986 /* Error handling: drop packet. */
5987 xlate_report_debug(
5988 ctx, OFT_ACTION,
5989 "Dropping packet as the decap() does not support "
5990 "packet type (%d,0x%x)",
5991 pt_ns(flow->packet_type), pt_ns_type(flow->packet_type));
7873e106 5992 ctx->error = XLATE_UNSUPPORTED_PACKET_TYPE;
f839892a
JS
5993 return false;
5994 }
5995}
5996
e12ec36b
SH
5997static void
5998recirc_for_mpls(const struct ofpact *a, struct xlate_ctx *ctx)
5999{
6000 /* No need to recirculate if already exiting. */
6001 if (ctx->exit) {
6002 return;
6003 }
6004
6005 /* Do not consider recirculating unless the packet was previously MPLS. */
6006 if (!ctx->was_mpls) {
6007 return;
6008 }
6009
6010 /* Special case these actions, only recirculating if necessary.
6011 * This avoids the overhead of recirculation in common use-cases.
6012 */
6013 switch (a->type) {
6014
6015 /* Output actions do not require recirculation. */
6016 case OFPACT_OUTPUT:
aaca4fe0 6017 case OFPACT_OUTPUT_TRUNC:
e12ec36b
SH
6018 case OFPACT_ENQUEUE:
6019 case OFPACT_OUTPUT_REG:
6020 /* Set actions that don't touch L3+ fields do not require recirculation. */
6021 case OFPACT_SET_VLAN_VID:
6022 case OFPACT_SET_VLAN_PCP:
6023 case OFPACT_SET_ETH_SRC:
6024 case OFPACT_SET_ETH_DST:
6025 case OFPACT_SET_TUNNEL:
6026 case OFPACT_SET_QUEUE:
6027 /* If actions of a group require recirculation that can be detected
6028 * when translating them. */
6029 case OFPACT_GROUP:
6030 return;
6031
6032 /* Set field that don't touch L3+ fields don't require recirculation. */
6033 case OFPACT_SET_FIELD:
6034 if (mf_is_l3_or_higher(ofpact_get_SET_FIELD(a)->field)) {
6035 break;
6036 }
6037 return;
6038
6039 /* For simplicity, recirculate in all other cases. */
6040 case OFPACT_CONTROLLER:
6041 case OFPACT_BUNDLE:
6042 case OFPACT_STRIP_VLAN:
6043 case OFPACT_PUSH_VLAN:
6044 case OFPACT_SET_IPV4_SRC:
6045 case OFPACT_SET_IPV4_DST:
6046 case OFPACT_SET_IP_DSCP:
6047 case OFPACT_SET_IP_ECN:
6048 case OFPACT_SET_IP_TTL:
6049 case OFPACT_SET_L4_SRC_PORT:
6050 case OFPACT_SET_L4_DST_PORT:
6051 case OFPACT_REG_MOVE:
6052 case OFPACT_STACK_PUSH:
6053 case OFPACT_STACK_POP:
6054 case OFPACT_DEC_TTL:
6055 case OFPACT_SET_MPLS_LABEL:
6056 case OFPACT_SET_MPLS_TC:
6057 case OFPACT_SET_MPLS_TTL:
6058 case OFPACT_DEC_MPLS_TTL:
6059 case OFPACT_PUSH_MPLS:
6060 case OFPACT_POP_MPLS:
6061 case OFPACT_POP_QUEUE:
6062 case OFPACT_FIN_TIMEOUT:
6063 case OFPACT_RESUBMIT:
6064 case OFPACT_LEARN:
6065 case OFPACT_CONJUNCTION:
6066 case OFPACT_MULTIPATH:
6067 case OFPACT_NOTE:
6068 case OFPACT_EXIT:
6069 case OFPACT_SAMPLE:
7ae62a67 6070 case OFPACT_CLONE:
f839892a
JS
6071 case OFPACT_ENCAP:
6072 case OFPACT_DECAP:
491e05c2 6073 case OFPACT_DEC_NSH_TTL:
e12ec36b
SH
6074 case OFPACT_UNROLL_XLATE:
6075 case OFPACT_CT:
72fe7578 6076 case OFPACT_CT_CLEAR:
e12ec36b
SH
6077 case OFPACT_NAT:
6078 case OFPACT_DEBUG_RECIRC:
a934a3dd 6079 case OFPACT_DEBUG_SLOW:
e12ec36b
SH
6080 case OFPACT_METER:
6081 case OFPACT_CLEAR_ACTIONS:
6082 case OFPACT_WRITE_ACTIONS:
6083 case OFPACT_WRITE_METADATA:
6084 case OFPACT_GOTO_TABLE:
6085 default:
6086 break;
6087 }
6088
6089 /* Recirculate */
6090 ctx_trigger_freeze(ctx);
6091}
6092
2d9b49dd
BP
6093static void
6094xlate_ofpact_reg_move(struct xlate_ctx *ctx, const struct ofpact_reg_move *a)
6095{
6096 mf_subfield_copy(&a->src, &a->dst, &ctx->xin->flow, ctx->wc);
6097 xlate_report_subfield(ctx, &a->dst);
6098}
6099
6100static void
6101xlate_ofpact_stack_pop(struct xlate_ctx *ctx, const struct ofpact_stack *a)
6102{
6103 if (nxm_execute_stack_pop(a, &ctx->xin->flow, ctx->wc, &ctx->stack)) {
6104 xlate_report_subfield(ctx, &a->subfield);
6105 } else {
6106 xlate_report_error(ctx, "stack underflow");
6107 }
6108}
6109
6110/* Restore translation context data that was stored earlier. */
6111static void
6112xlate_ofpact_unroll_xlate(struct xlate_ctx *ctx,
6113 const struct ofpact_unroll_xlate *a)
6114{
6115 ctx->table_id = a->rule_table_id;
6116 ctx->rule_cookie = a->rule_cookie;
6117 xlate_report(ctx, OFT_THAW, "restored state: table=%"PRIu8", "
6118 "cookie=%#"PRIx64, a->rule_table_id, a->rule_cookie);
6119}
6120
9583bc14
EJ
6121static void
6122do_xlate_actions(const struct ofpact *ofpacts, size_t ofpacts_len,
feee58b9 6123 struct xlate_ctx *ctx, bool is_last_action)
9583bc14 6124{
49a73e0c 6125 struct flow_wildcards *wc = ctx->wc;
33bf9176 6126 struct flow *flow = &ctx->xin->flow;
9583bc14
EJ
6127 const struct ofpact *a;
6128
a36de779 6129 if (ovs_native_tunneling_is_on(ctx->xbridge->ofproto)) {
53902038 6130 tnl_neigh_snoop(flow, wc, ctx->xbridge->name);
a36de779 6131 }
f47ea021
JR
6132 /* dl_type already in the mask, not set below. */
6133
2d9b49dd
BP
6134 if (!ofpacts_len) {
6135 xlate_report(ctx, OFT_ACTION, "drop");
6136 return;
6137 }
6138
9583bc14
EJ
6139 OFPACT_FOR_EACH (a, ofpacts, ofpacts_len) {
6140 struct ofpact_controller *controller;
6141 const struct ofpact_metadata *metadata;
b2dd70be
JR
6142 const struct ofpact_set_field *set_field;
6143 const struct mf_field *mf;
feee58b9
AZ
6144 bool last = is_last_action && ofpact_last(a, ofpacts, ofpacts_len)
6145 && ctx->action_set.size;
9583bc14 6146
fff1b9c0
JR
6147 if (ctx->error) {
6148 break;
6149 }
6150
e12ec36b
SH
6151 recirc_for_mpls(a, ctx);
6152
e672ff9b
JR
6153 if (ctx->exit) {
6154 /* Check if need to store the remaining actions for later
6155 * execution. */
1d361a81
BP
6156 if (ctx->freezing) {
6157 freeze_unroll_actions(a, ofpact_end(ofpacts, ofpacts_len),
e672ff9b
JR
6158 ctx);
6159 }
6160 break;
7bbdd84f
SH
6161 }
6162
2d9b49dd
BP
6163 if (OVS_UNLIKELY(ctx->xin->trace)) {
6164 struct ds s = DS_EMPTY_INITIALIZER;
50f96b10 6165 ofpacts_format(a, OFPACT_ALIGN(a->len), NULL, &s);
2d9b49dd
BP
6166 xlate_report(ctx, OFT_ACTION, "%s", ds_cstr(&s));
6167 ds_destroy(&s);
6168 }
6169
9583bc14
EJ
6170 switch (a->type) {
6171 case OFPACT_OUTPUT:
6172 xlate_output_action(ctx, ofpact_get_OUTPUT(a)->port,
8b496c72
BP
6173 ofpact_get_OUTPUT(a)->max_len, true, last,
6174 false);
9583bc14
EJ
6175 break;
6176
7395c052 6177 case OFPACT_GROUP:
feee58b9 6178 if (xlate_group_action(ctx, ofpact_get_GROUP(a)->group_id, last)) {
1d741d6d 6179 /* Group could not be found. */
db88b35c
JR
6180
6181 /* XXX: Terminates action list translation, but does not
6182 * terminate the pipeline. */
f4fb341b
SH
6183 return;
6184 }
7395c052
NZ
6185 break;
6186
9583bc14
EJ
6187 case OFPACT_CONTROLLER:
6188 controller = ofpact_get_CONTROLLER(a);
77ab5fd2
BP
6189 if (controller->pause) {
6190 ctx->pause = controller;
77ab5fd2
BP
6191 ctx_trigger_freeze(ctx);
6192 a = ofpact_next(a);
6193 } else {
d39ec23d
JP
6194 xlate_controller_action(ctx, controller->max_len,
6195 controller->reason,
6196 controller->controller_id,
6197 controller->userdata,
6198 controller->userdata_len);
77ab5fd2 6199 }
9583bc14
EJ
6200 break;
6201
6202 case OFPACT_ENQUEUE:
16194afd
DDP
6203 memset(&wc->masks.skb_priority, 0xff,
6204 sizeof wc->masks.skb_priority);
feee58b9 6205 xlate_enqueue_action(ctx, ofpact_get_ENQUEUE(a), last);
9583bc14
EJ
6206 break;
6207
6208 case OFPACT_SET_VLAN_VID:
f0fb825a
EG
6209 wc->masks.vlans[0].tci |= htons(VLAN_VID_MASK | VLAN_CFI);
6210 if (flow->vlans[0].tci & htons(VLAN_CFI) ||
ca287d20 6211 ofpact_get_SET_VLAN_VID(a)->push_vlan_if_needed) {
f0fb825a
EG
6212 if (!flow->vlans[0].tpid) {
6213 flow->vlans[0].tpid = htons(ETH_TYPE_VLAN);
6214 }
6215 flow->vlans[0].tci &= ~htons(VLAN_VID_MASK);
6216 flow->vlans[0].tci |=
6217 (htons(ofpact_get_SET_VLAN_VID(a)->vlan_vid) |
6218 htons(VLAN_CFI));
ca287d20 6219 }
9583bc14
EJ
6220 break;
6221
6222 case OFPACT_SET_VLAN_PCP:
f0fb825a
EG
6223 wc->masks.vlans[0].tci |= htons(VLAN_PCP_MASK | VLAN_CFI);
6224 if (flow->vlans[0].tci & htons(VLAN_CFI) ||
ca287d20 6225 ofpact_get_SET_VLAN_PCP(a)->push_vlan_if_needed) {
f0fb825a
EG
6226 if (!flow->vlans[0].tpid) {
6227 flow->vlans[0].tpid = htons(ETH_TYPE_VLAN);
6228 }
6229 flow->vlans[0].tci &= ~htons(VLAN_PCP_MASK);
6230 flow->vlans[0].tci |=
6231 htons((ofpact_get_SET_VLAN_PCP(a)->vlan_pcp
6232 << VLAN_PCP_SHIFT) | VLAN_CFI);
ca287d20 6233 }
9583bc14
EJ
6234 break;
6235
6236 case OFPACT_STRIP_VLAN:
f0fb825a 6237 flow_pop_vlan(flow, wc);
9583bc14
EJ
6238 break;
6239
6240 case OFPACT_PUSH_VLAN:
f0fb825a
EG
6241 flow_push_vlan_uninit(flow, wc);
6242 flow->vlans[0].tpid = ofpact_get_PUSH_VLAN(a)->ethertype;
6243 flow->vlans[0].tci = htons(VLAN_CFI);
9583bc14
EJ
6244 break;
6245
6246 case OFPACT_SET_ETH_SRC:
74ff3298
JR
6247 WC_MASK_FIELD(wc, dl_src);
6248 flow->dl_src = ofpact_get_SET_ETH_SRC(a)->mac;
9583bc14
EJ
6249 break;
6250
6251 case OFPACT_SET_ETH_DST:
74ff3298
JR
6252 WC_MASK_FIELD(wc, dl_dst);
6253 flow->dl_dst = ofpact_get_SET_ETH_DST(a)->mac;
9583bc14
EJ
6254 break;
6255
6256 case OFPACT_SET_IPV4_SRC:
33bf9176 6257 if (flow->dl_type == htons(ETH_TYPE_IP)) {
f47ea021 6258 memset(&wc->masks.nw_src, 0xff, sizeof wc->masks.nw_src);
33bf9176 6259 flow->nw_src = ofpact_get_SET_IPV4_SRC(a)->ipv4;
9583bc14
EJ
6260 }
6261 break;
6262
6263 case OFPACT_SET_IPV4_DST:
33bf9176 6264 if (flow->dl_type == htons(ETH_TYPE_IP)) {
f47ea021 6265 memset(&wc->masks.nw_dst, 0xff, sizeof wc->masks.nw_dst);
33bf9176 6266 flow->nw_dst = ofpact_get_SET_IPV4_DST(a)->ipv4;
9583bc14
EJ
6267 }
6268 break;
6269
04f01c24
BP
6270 case OFPACT_SET_IP_DSCP:
6271 if (is_ip_any(flow)) {
f47ea021 6272 wc->masks.nw_tos |= IP_DSCP_MASK;
33bf9176 6273 flow->nw_tos &= ~IP_DSCP_MASK;
04f01c24 6274 flow->nw_tos |= ofpact_get_SET_IP_DSCP(a)->dscp;
9583bc14
EJ
6275 }
6276 break;
6277
ff14eb7a
JR
6278 case OFPACT_SET_IP_ECN:
6279 if (is_ip_any(flow)) {
6280 wc->masks.nw_tos |= IP_ECN_MASK;
6281 flow->nw_tos &= ~IP_ECN_MASK;
6282 flow->nw_tos |= ofpact_get_SET_IP_ECN(a)->ecn;
6283 }
6284 break;
6285
0c20dbe4
JR
6286 case OFPACT_SET_IP_TTL:
6287 if (is_ip_any(flow)) {
6288 wc->masks.nw_ttl = 0xff;
6289 flow->nw_ttl = ofpact_get_SET_IP_TTL(a)->ttl;
6290 }
6291 break;
6292
9583bc14 6293 case OFPACT_SET_L4_SRC_PORT:
b8778a0d 6294 if (is_ip_any(flow) && !(flow->nw_frag & FLOW_NW_FRAG_LATER)) {
f47ea021
JR
6295 memset(&wc->masks.nw_proto, 0xff, sizeof wc->masks.nw_proto);
6296 memset(&wc->masks.tp_src, 0xff, sizeof wc->masks.tp_src);
33bf9176 6297 flow->tp_src = htons(ofpact_get_SET_L4_SRC_PORT(a)->port);
9583bc14
EJ
6298 }
6299 break;
6300
6301 case OFPACT_SET_L4_DST_PORT:
b8778a0d 6302 if (is_ip_any(flow) && !(flow->nw_frag & FLOW_NW_FRAG_LATER)) {
f47ea021
JR
6303 memset(&wc->masks.nw_proto, 0xff, sizeof wc->masks.nw_proto);
6304 memset(&wc->masks.tp_dst, 0xff, sizeof wc->masks.tp_dst);
33bf9176 6305 flow->tp_dst = htons(ofpact_get_SET_L4_DST_PORT(a)->port);
9583bc14
EJ
6306 }
6307 break;
6308
6309 case OFPACT_RESUBMIT:
8bf009bf
JR
6310 /* Freezing complicates resubmit. Some action in the flow
6311 * entry found by resubmit might trigger freezing. If that
6312 * happens, then we do not want to execute the resubmit again after
6313 * during thawing, so we want to skip back to the head of the loop
6314 * to avoid that, only adding any actions that follow the resubmit
6315 * to the frozen actions.
6b1c5734 6316 */
feee58b9 6317 xlate_ofpact_resubmit(ctx, ofpact_get_RESUBMIT(a), last);
6b1c5734 6318 continue;
9583bc14
EJ
6319
6320 case OFPACT_SET_TUNNEL:
33bf9176 6321 flow->tunnel.tun_id = htonll(ofpact_get_SET_TUNNEL(a)->tun_id);
9583bc14
EJ
6322 break;
6323
6324 case OFPACT_SET_QUEUE:
16194afd
DDP
6325 memset(&wc->masks.skb_priority, 0xff,
6326 sizeof wc->masks.skb_priority);
9583bc14
EJ
6327 xlate_set_queue_action(ctx, ofpact_get_SET_QUEUE(a)->queue_id);
6328 break;
6329
6330 case OFPACT_POP_QUEUE:
16194afd
DDP
6331 memset(&wc->masks.skb_priority, 0xff,
6332 sizeof wc->masks.skb_priority);
2d9b49dd
BP
6333 if (flow->skb_priority != ctx->orig_skb_priority) {
6334 flow->skb_priority = ctx->orig_skb_priority;
6335 xlate_report(ctx, OFT_DETAIL, "queue = %#"PRIx32,
6336 flow->skb_priority);
6337 }
9583bc14
EJ
6338 break;
6339
6340 case OFPACT_REG_MOVE:
2d9b49dd 6341 xlate_ofpact_reg_move(ctx, ofpact_get_REG_MOVE(a));
9583bc14
EJ
6342 break;
6343
b2dd70be
JR
6344 case OFPACT_SET_FIELD:
6345 set_field = ofpact_get_SET_FIELD(a);
6346 mf = set_field->field;
b2dd70be 6347
aff49b8c
JR
6348 /* Set the field only if the packet actually has it. */
6349 if (mf_are_prereqs_ok(mf, flow, wc)) {
128684a6
JR
6350 mf_mask_field_masked(mf, ofpact_set_field_mask(set_field), wc);
6351 mf_set_flow_value_masked(mf, set_field->value,
6352 ofpact_set_field_mask(set_field),
6353 flow);
2d9b49dd
BP
6354 } else {
6355 xlate_report(ctx, OFT_WARN,
6356 "unmet prerequisites for %s, set_field ignored",
6357 mf->name);
6358
b8778a0d 6359 }
b2dd70be
JR
6360 break;
6361
9583bc14 6362 case OFPACT_STACK_PUSH:
33bf9176
BP
6363 nxm_execute_stack_push(ofpact_get_STACK_PUSH(a), flow, wc,
6364 &ctx->stack);
9583bc14
EJ
6365 break;
6366
6367 case OFPACT_STACK_POP:
2d9b49dd 6368 xlate_ofpact_stack_pop(ctx, ofpact_get_STACK_POP(a));
9583bc14
EJ
6369 break;
6370
6371 case OFPACT_PUSH_MPLS:
8bfd0fda 6372 compose_mpls_push_action(ctx, ofpact_get_PUSH_MPLS(a));
9583bc14
EJ
6373 break;
6374
6375 case OFPACT_POP_MPLS:
8bfd0fda 6376 compose_mpls_pop_action(ctx, ofpact_get_POP_MPLS(a)->ethertype);
9583bc14
EJ
6377 break;
6378
097d4939 6379 case OFPACT_SET_MPLS_LABEL:
8bfd0fda
BP
6380 compose_set_mpls_label_action(
6381 ctx, ofpact_get_SET_MPLS_LABEL(a)->label);
1d741d6d 6382 break;
097d4939
JR
6383
6384 case OFPACT_SET_MPLS_TC:
8bfd0fda 6385 compose_set_mpls_tc_action(ctx, ofpact_get_SET_MPLS_TC(a)->tc);
097d4939
JR
6386 break;
6387
9583bc14 6388 case OFPACT_SET_MPLS_TTL:
8bfd0fda 6389 compose_set_mpls_ttl_action(ctx, ofpact_get_SET_MPLS_TTL(a)->ttl);
9583bc14
EJ
6390 break;
6391
6392 case OFPACT_DEC_MPLS_TTL:
9cfef3d0 6393 if (compose_dec_mpls_ttl_action(ctx)) {
ad3efdcb 6394 return;
9583bc14
EJ
6395 }
6396 break;
6397
491e05c2
YY
6398 case OFPACT_DEC_NSH_TTL:
6399 if (compose_dec_nsh_ttl_action(ctx)) {
6400 return;
6401 }
6402 break;
6403
9583bc14 6404 case OFPACT_DEC_TTL:
f74e7df7 6405 wc->masks.nw_ttl = 0xff;
9583bc14 6406 if (compose_dec_ttl(ctx, ofpact_get_DEC_TTL(a))) {
ad3efdcb 6407 return;
9583bc14
EJ
6408 }
6409 break;
6410
6411 case OFPACT_NOTE:
6412 /* Nothing to do. */
6413 break;
6414
6415 case OFPACT_MULTIPATH:
33bf9176 6416 multipath_execute(ofpact_get_MULTIPATH(a), flow, wc);
2d9b49dd 6417 xlate_report_subfield(ctx, &ofpact_get_MULTIPATH(a)->dst);
9583bc14
EJ
6418 break;
6419
6420 case OFPACT_BUNDLE:
feee58b9 6421 xlate_bundle_action(ctx, ofpact_get_BUNDLE(a), last);
9583bc14
EJ
6422 break;
6423
6424 case OFPACT_OUTPUT_REG:
feee58b9 6425 xlate_output_reg_action(ctx, ofpact_get_OUTPUT_REG(a), last);
9583bc14
EJ
6426 break;
6427
aaca4fe0
WT
6428 case OFPACT_OUTPUT_TRUNC:
6429 xlate_output_trunc_action(ctx, ofpact_get_OUTPUT_TRUNC(a)->port,
feee58b9 6430 ofpact_get_OUTPUT_TRUNC(a)->max_len, last);
aaca4fe0
WT
6431 break;
6432
9583bc14
EJ
6433 case OFPACT_LEARN:
6434 xlate_learn_action(ctx, ofpact_get_LEARN(a));
6435 break;
6436
2d9b49dd 6437 case OFPACT_CONJUNCTION:
afc3987b
BP
6438 /* A flow with a "conjunction" action represents part of a special
6439 * kind of "set membership match". Such a flow should not actually
6440 * get executed, but it could via, say, a "packet-out", even though
6441 * that wouldn't be useful. Log it to help debugging. */
2d9b49dd 6442 xlate_report_error(ctx, "executing no-op conjunction action");
18080541
BP
6443 break;
6444
9583bc14
EJ
6445 case OFPACT_EXIT:
6446 ctx->exit = true;
6447 break;
6448
2d9b49dd
BP
6449 case OFPACT_UNROLL_XLATE:
6450 xlate_ofpact_unroll_xlate(ctx, ofpact_get_UNROLL_XLATE(a));
e672ff9b 6451 break;
2d9b49dd 6452
9583bc14 6453 case OFPACT_FIN_TIMEOUT:
33bf9176 6454 memset(&wc->masks.nw_proto, 0xff, sizeof wc->masks.nw_proto);
9583bc14
EJ
6455 xlate_fin_timeout(ctx, ofpact_get_FIN_TIMEOUT(a));
6456 break;
6457
6458 case OFPACT_CLEAR_ACTIONS:
2d9b49dd 6459 xlate_report_action_set(ctx, "was");
7fdb60a7 6460 ofpbuf_clear(&ctx->action_set);
c61f3870
BP
6461 ctx->xin->flow.actset_output = OFPP_UNSET;
6462 ctx->action_set_has_group = false;
7fdb60a7
SH
6463 break;
6464
6465 case OFPACT_WRITE_ACTIONS:
7e7e8dbb 6466 xlate_write_actions(ctx, ofpact_get_WRITE_ACTIONS(a));
2d9b49dd 6467 xlate_report_action_set(ctx, "is");
9583bc14
EJ
6468 break;
6469
6470 case OFPACT_WRITE_METADATA:
6471 metadata = ofpact_get_WRITE_METADATA(a);
33bf9176
BP
6472 flow->metadata &= ~metadata->mask;
6473 flow->metadata |= metadata->metadata & metadata->mask;
9583bc14
EJ
6474 break;
6475
638a19b0 6476 case OFPACT_METER:
076caa2f 6477 xlate_meter_action(ctx, ofpact_get_METER(a));
638a19b0
JR
6478 break;
6479
9583bc14 6480 case OFPACT_GOTO_TABLE: {
9583bc14 6481 struct ofpact_goto_table *ogt = ofpact_get_GOTO_TABLE(a);
9583bc14 6482
9167fc1a
JR
6483 ovs_assert(ctx->table_id < ogt->table_id);
6484
4468099e 6485 xlate_table_action(ctx, ctx->xin->flow.in_port.ofp_port,
96c3a6e5
AZ
6486 ogt->table_id, true, true, false, last,
6487 do_xlate_actions);
9583bc14
EJ
6488 break;
6489 }
6490
6491 case OFPACT_SAMPLE:
6492 xlate_sample_action(ctx, ofpact_get_SAMPLE(a));
6493 break;
d4abaff5 6494
7ae62a67 6495 case OFPACT_CLONE:
feee58b9 6496 compose_clone(ctx, ofpact_get_CLONE(a), last);
7ae62a67
WT
6497 break;
6498
f839892a
JS
6499 case OFPACT_ENCAP:
6500 xlate_generic_encap_action(ctx, ofpact_get_ENCAP(a));
6501 break;
6502
6503 case OFPACT_DECAP: {
6504 bool recirc_needed =
6505 xlate_generic_decap_action(ctx, ofpact_get_DECAP(a));
6506 if (!ctx->error && recirc_needed) {
6507 /* Recirculate for parsing of inner packet. */
6508 ctx_trigger_freeze(ctx);
6509 /* Then continue with next action. */
6510 a = ofpact_next(a);
6511 }
6512 break;
6513 }
6514
07659514 6515 case OFPACT_CT:
feee58b9 6516 compose_conntrack_action(ctx, ofpact_get_CT(a), last);
07659514
JS
6517 break;
6518
72fe7578 6519 case OFPACT_CT_CLEAR:
88a20f8c 6520 compose_ct_clear_action(ctx);
72fe7578
BP
6521 break;
6522
9ac0aada
JR
6523 case OFPACT_NAT:
6524 /* This will be processed by compose_conntrack_action(). */
6525 ctx->ct_nat_action = ofpact_get_NAT(a);
6526 break;
6527
d4abaff5 6528 case OFPACT_DEBUG_RECIRC:
1d361a81 6529 ctx_trigger_freeze(ctx);
d4abaff5
BP
6530 a = ofpact_next(a);
6531 break;
a934a3dd
JP
6532
6533 case OFPACT_DEBUG_SLOW:
6534 ctx->xout->slow |= SLOW_ACTION;
6535 break;
9583bc14 6536 }
1d741d6d
JR
6537
6538 /* Check if need to store this and the remaining actions for later
6539 * execution. */
1d361a81
BP
6540 if (!ctx->error && ctx->exit && ctx_first_frozen_action(ctx)) {
6541 freeze_unroll_actions(a, ofpact_end(ofpacts, ofpacts_len), ctx);
1d741d6d
JR
6542 break;
6543 }
9583bc14 6544 }
9583bc14
EJ
6545}
6546
6547void
6548xlate_in_init(struct xlate_in *xin, struct ofproto_dpif *ofproto,
1f4a8933
JR
6549 ovs_version_t version, const struct flow *flow,
6550 ofp_port_t in_port, struct rule_dpif *rule, uint16_t tcp_flags,
1520ef4f
BP
6551 const struct dp_packet *packet, struct flow_wildcards *wc,
6552 struct ofpbuf *odp_actions)
9583bc14
EJ
6553{
6554 xin->ofproto = ofproto;
1f4a8933 6555 xin->tables_version = version;
9583bc14 6556 xin->flow = *flow;
8d8ab6c2 6557 xin->upcall_flow = flow;
cc377352 6558 xin->flow.in_port.ofp_port = in_port;
c61f3870 6559 xin->flow.actset_output = OFPP_UNSET;
9583bc14 6560 xin->packet = packet;
df70a773 6561 xin->allow_side_effects = packet != NULL;
9583bc14 6562 xin->rule = rule;
b256dc52 6563 xin->xcache = NULL;
9583bc14
EJ
6564 xin->ofpacts = NULL;
6565 xin->ofpacts_len = 0;
6566 xin->tcp_flags = tcp_flags;
2d9b49dd 6567 xin->trace = NULL;
9583bc14 6568 xin->resubmit_stats = NULL;
790c5d26 6569 xin->depth = 0;
cdd42eda 6570 xin->resubmits = 0;
49a73e0c 6571 xin->wc = wc;
1520ef4f 6572 xin->odp_actions = odp_actions;
331c07ac 6573 xin->in_packet_out = false;
e6bc8e74 6574 xin->recirc_queue = NULL;
43e73536 6575 xin->xport_uuid = UUID_ZERO;
e672ff9b
JR
6576
6577 /* Do recirc lookup. */
1d361a81 6578 xin->frozen_state = NULL;
29b1ea3f
BP
6579 if (flow->recirc_id) {
6580 const struct recirc_id_node *node
6581 = recirc_id_node_find(flow->recirc_id);
6582 if (node) {
1d361a81 6583 xin->frozen_state = &node->state;
29b1ea3f
BP
6584 }
6585 }
9583bc14
EJ
6586}
6587
6588void
6589xlate_out_uninit(struct xlate_out *xout)
6590{
e672ff9b 6591 if (xout) {
fbf5d6ec 6592 recirc_refs_unref(&xout->recircs);
9583bc14
EJ
6593 }
6594}
9583bc14 6595\f
55954f6e
EJ
6596static struct skb_priority_to_dscp *
6597get_skb_priority(const struct xport *xport, uint32_t skb_priority)
6598{
6599 struct skb_priority_to_dscp *pdscp;
6600 uint32_t hash;
6601
6602 hash = hash_int(skb_priority, 0);
6603 HMAP_FOR_EACH_IN_BUCKET (pdscp, hmap_node, hash, &xport->skb_priorities) {
6604 if (pdscp->skb_priority == skb_priority) {
6605 return pdscp;
6606 }
6607 }
6608 return NULL;
6609}
6610
6611static bool
6612dscp_from_skb_priority(const struct xport *xport, uint32_t skb_priority,
6613 uint8_t *dscp)
6614{
6615 struct skb_priority_to_dscp *pdscp = get_skb_priority(xport, skb_priority);
6616 *dscp = pdscp ? pdscp->dscp : 0;
6617 return pdscp != NULL;
6618}
6619
16194afd
DDP
6620static size_t
6621count_skb_priorities(const struct xport *xport)
6622{
6623 return hmap_count(&xport->skb_priorities);
6624}
6625
55954f6e
EJ
6626static void
6627clear_skb_priorities(struct xport *xport)
6628{
4ec3d7c7 6629 struct skb_priority_to_dscp *pdscp;
55954f6e 6630
4ec3d7c7 6631 HMAP_FOR_EACH_POP (pdscp, hmap_node, &xport->skb_priorities) {
55954f6e
EJ
6632 free(pdscp);
6633 }
6634}
6635
ce4a6b76
BP
6636static bool
6637actions_output_to_local_port(const struct xlate_ctx *ctx)
6638{
46c88433 6639 odp_port_t local_odp_port = ofp_port_to_odp_port(ctx->xbridge, OFPP_LOCAL);
ce4a6b76
BP
6640 const struct nlattr *a;
6641 unsigned int left;
6642
1520ef4f
BP
6643 NL_ATTR_FOR_EACH_UNSAFE (a, left, ctx->odp_actions->data,
6644 ctx->odp_actions->size) {
ce4a6b76
BP
6645 if (nl_attr_type(a) == OVS_ACTION_ATTR_OUTPUT
6646 && nl_attr_get_odp_port(a) == local_odp_port) {
6647 return true;
6648 }
6649 }
6650 return false;
6651}
9583bc14 6652
5e2a6702 6653#if defined(__linux__)
7d031d7e
BP
6654/* Returns the maximum number of packets that the Linux kernel is willing to
6655 * queue up internally to certain kinds of software-implemented ports, or the
6656 * default (and rarely modified) value if it cannot be determined. */
6657static int
6658netdev_max_backlog(void)
6659{
6660 static struct ovsthread_once once = OVSTHREAD_ONCE_INITIALIZER;
6661 static int max_backlog = 1000; /* The normal default value. */
6662
6663 if (ovsthread_once_start(&once)) {
6664 static const char filename[] = "/proc/sys/net/core/netdev_max_backlog";
6665 FILE *stream;
6666 int n;
6667
6668 stream = fopen(filename, "r");
6669 if (!stream) {
120c348f 6670 VLOG_INFO("%s: open failed (%s)", filename, ovs_strerror(errno));
7d031d7e
BP
6671 } else {
6672 if (fscanf(stream, "%d", &n) != 1) {
6673 VLOG_WARN("%s: read error", filename);
6674 } else if (n <= 100) {
6675 VLOG_WARN("%s: unexpectedly small value %d", filename, n);
6676 } else {
6677 max_backlog = n;
6678 }
6679 fclose(stream);
6680 }
6681 ovsthread_once_done(&once);
6682
6683 VLOG_DBG("%s: using %d max_backlog", filename, max_backlog);
6684 }
6685
6686 return max_backlog;
6687}
6688
6689/* Counts and returns the number of OVS_ACTION_ATTR_OUTPUT actions in
6690 * 'odp_actions'. */
6691static int
6692count_output_actions(const struct ofpbuf *odp_actions)
6693{
6694 const struct nlattr *a;
6695 size_t left;
6696 int n = 0;
6697
6fd6ed71 6698 NL_ATTR_FOR_EACH_UNSAFE (a, left, odp_actions->data, odp_actions->size) {
7d031d7e
BP
6699 if (a->nla_type == OVS_ACTION_ATTR_OUTPUT) {
6700 n++;
6701 }
6702 }
6703 return n;
6704}
5e2a6702 6705#endif /* defined(__linux__) */
7d031d7e
BP
6706
6707/* Returns true if 'odp_actions' contains more output actions than the datapath
6708 * can reliably handle in one go. On Linux, this is the value of the
6709 * net.core.netdev_max_backlog sysctl, which limits the maximum number of
6710 * packets that the kernel is willing to queue up for processing while the
6711 * datapath is processing a set of actions. */
6712static bool
5e2a6702 6713too_many_output_actions(const struct ofpbuf *odp_actions OVS_UNUSED)
7d031d7e
BP
6714{
6715#ifdef __linux__
6fd6ed71 6716 return (odp_actions->size / NL_A_U32_SIZE > netdev_max_backlog()
7d031d7e
BP
6717 && count_output_actions(odp_actions) > netdev_max_backlog());
6718#else
6719 /* OSes other than Linux might have similar limits, but we don't know how
6720 * to determine them.*/
6721 return false;
6722#endif
6723}
6724
234c3da9
BP
6725static void
6726xlate_wc_init(struct xlate_ctx *ctx)
6727{
6728 flow_wildcards_init_catchall(ctx->wc);
6729
6730 /* Some fields we consider to always be examined. */
3d4b2e6e 6731 WC_MASK_FIELD(ctx->wc, packet_type);
5e2e998a 6732 WC_MASK_FIELD(ctx->wc, in_port);
3d4b2e6e
JS
6733 if (is_ethernet(&ctx->xin->flow, NULL)) {
6734 WC_MASK_FIELD(ctx->wc, dl_type);
6735 }
234c3da9 6736 if (is_ip_any(&ctx->xin->flow)) {
5e2e998a 6737 WC_MASK_FIELD_MASK(ctx->wc, nw_frag, FLOW_NW_FRAG_MASK);
234c3da9
BP
6738 }
6739
6740 if (ctx->xbridge->support.odp.recirc) {
6741 /* Always exactly match recirc_id when datapath supports
6742 * recirculation. */
5e2e998a 6743 WC_MASK_FIELD(ctx->wc, recirc_id);
234c3da9
BP
6744 }
6745
6746 if (ctx->xbridge->netflow) {
6747 netflow_mask_wc(&ctx->xin->flow, ctx->wc);
6748 }
6749
6750 tnl_wc_init(&ctx->xin->flow, ctx->wc);
6751}
6752
6753static void
6754xlate_wc_finish(struct xlate_ctx *ctx)
6755{
f0fb825a
EG
6756 int i;
6757
234c3da9
BP
6758 /* Clear the metadata and register wildcard masks, because we won't
6759 * use non-header fields as part of the cache. */
6760 flow_wildcards_clear_non_packet_fields(ctx->wc);
6761
f839892a 6762 /* Wildcard ethernet fields if the original packet type was not
beb75a40
JS
6763 * Ethernet. */
6764 if (ctx->xin->upcall_flow->packet_type != htonl(PT_ETH)) {
6765 ctx->wc->masks.dl_dst = eth_addr_zero;
6766 ctx->wc->masks.dl_src = eth_addr_zero;
3d4b2e6e 6767 ctx->wc->masks.dl_type = 0;
beb75a40
JS
6768 }
6769
234c3da9
BP
6770 /* ICMPv4 and ICMPv6 have 8-bit "type" and "code" fields. struct flow
6771 * uses the low 8 bits of the 16-bit tp_src and tp_dst members to
6772 * represent these fields. The datapath interface, on the other hand,
6773 * represents them with just 8 bits each. This means that if the high
6774 * 8 bits of the masks for these fields somehow become set, then they
6775 * will get chopped off by a round trip through the datapath, and
6776 * revalidation will spot that as an inconsistency and delete the flow.
6777 * Avoid the problem here by making sure that only the low 8 bits of
6778 * either field can be unwildcarded for ICMP.
6779 */
a75636c8 6780 if (is_icmpv4(&ctx->xin->flow, NULL) || is_icmpv6(&ctx->xin->flow, NULL)) {
234c3da9
BP
6781 ctx->wc->masks.tp_src &= htons(UINT8_MAX);
6782 ctx->wc->masks.tp_dst &= htons(UINT8_MAX);
6783 }
6784 /* VLAN_TCI CFI bit must be matched if any of the TCI is matched. */
f0fb825a
EG
6785 for (i = 0; i < FLOW_MAX_VLAN_HEADERS; i++) {
6786 if (ctx->wc->masks.vlans[i].tci) {
6787 ctx->wc->masks.vlans[i].tci |= htons(VLAN_CFI);
6788 }
234c3da9 6789 }
4a7ab326
DDP
6790
6791 /* The classifier might return masks that match on tp_src and tp_dst even
6792 * for later fragments. This happens because there might be flows that
6793 * match on tp_src or tp_dst without matching on the frag bits, because
6794 * it is not a prerequisite for OpenFlow. Since it is a prerequisite for
6795 * datapath flows and since tp_src and tp_dst are always going to be 0,
6796 * wildcard the fields here. */
6797 if (ctx->xin->flow.nw_frag & FLOW_NW_FRAG_LATER) {
6798 ctx->wc->masks.tp_src = 0;
6799 ctx->wc->masks.tp_dst = 0;
6800 }
234c3da9
BP
6801}
6802
e672ff9b
JR
6803/* Translates the flow, actions, or rule in 'xin' into datapath actions in
6804 * 'xout'.
56450a41 6805 * The caller must take responsibility for eventually freeing 'xout', with
fff1b9c0
JR
6806 * xlate_out_uninit().
6807 * Returns 'XLATE_OK' if translation was successful. In case of an error an
6808 * empty set of actions will be returned in 'xin->odp_actions' (if non-NULL),
6809 * so that most callers may ignore the return value and transparently install a
6810 * drop flow when the translation fails. */
6811enum xlate_error
84f0f298 6812xlate_actions(struct xlate_in *xin, struct xlate_out *xout)
9583bc14 6813{
e467ea42
BP
6814 *xout = (struct xlate_out) {
6815 .slow = 0,
fbf5d6ec 6816 .recircs = RECIRC_REFS_EMPTY_INITIALIZER,
e467ea42
BP
6817 };
6818
84f0f298 6819 struct xlate_cfg *xcfg = ovsrcu_get(struct xlate_cfg *, &xcfgp);
bb00fdef
BP
6820 struct xbridge *xbridge = xbridge_lookup(xcfg, xin->ofproto);
6821 if (!xbridge) {
fff1b9c0 6822 return XLATE_BRIDGE_NOT_FOUND;
bb00fdef
BP
6823 }
6824
33bf9176
BP
6825 struct flow *flow = &xin->flow;
6826
84cf3c1f 6827 uint8_t stack_stub[1024];
bb00fdef 6828 uint64_t action_set_stub[1024 / 8];
1d361a81 6829 uint64_t frozen_actions_stub[1024 / 8];
1520ef4f
BP
6830 uint64_t actions_stub[256 / 8];
6831 struct ofpbuf scratch_actions = OFPBUF_STUB_INITIALIZER(actions_stub);
bb00fdef
BP
6832 struct xlate_ctx ctx = {
6833 .xin = xin,
6834 .xout = xout,
6835 .base_flow = *flow,
c2b878e0 6836 .orig_tunnel_ipv6_dst = flow_tnl_dst(&flow->tunnel),
68f515ca 6837 .xcfg = xcfg,
bb00fdef
BP
6838 .xbridge = xbridge,
6839 .stack = OFPBUF_STUB_INITIALIZER(stack_stub),
6840 .rule = xin->rule,
c0e638aa
BP
6841 .wc = (xin->wc
6842 ? xin->wc
f36efd90 6843 : &(struct flow_wildcards) { .masks = { .dl_type = 0 } }),
1520ef4f 6844 .odp_actions = xin->odp_actions ? xin->odp_actions : &scratch_actions,
bb00fdef 6845
790c5d26 6846 .depth = xin->depth,
cdd42eda 6847 .resubmits = xin->resubmits,
bb00fdef
BP
6848 .in_group = false,
6849 .in_action_set = false,
331c07ac 6850 .in_packet_out = xin->in_packet_out,
f839892a 6851 .pending_encap = false,
1fc11c59 6852 .encap_data = NULL,
bb00fdef
BP
6853
6854 .table_id = 0,
6855 .rule_cookie = OVS_BE64_MAX,
6856 .orig_skb_priority = flow->skb_priority,
6857 .sflow_n_outputs = 0,
6858 .sflow_odp_port = 0,
2031ef97 6859 .nf_output_iface = NF_OUT_DROP,
bb00fdef 6860 .exit = false,
fff1b9c0 6861 .error = XLATE_OK,
3d6151f3 6862 .mirrors = 0,
bb00fdef 6863
1d361a81 6864 .freezing = false,
53cc166a 6865 .recirc_update_dp_hash = false,
1d361a81 6866 .frozen_actions = OFPBUF_STUB_INITIALIZER(frozen_actions_stub),
77ab5fd2 6867 .pause = NULL,
bb00fdef 6868
e12ec36b 6869 .was_mpls = false,
07659514 6870 .conntracked = false,
bb00fdef 6871
9ac0aada
JR
6872 .ct_nat_action = NULL,
6873
bb00fdef
BP
6874 .action_set_has_group = false,
6875 .action_set = OFPBUF_STUB_INITIALIZER(action_set_stub),
6876 };
865ca6cf
BP
6877
6878 /* 'base_flow' reflects the packet as it came in, but we need it to reflect
42deb67d
PS
6879 * the packet as the datapath will treat it for output actions. Our
6880 * datapath doesn't retain tunneling information without us re-setting
6881 * it, so clear the tunnel data.
865ca6cf 6882 */
42deb67d 6883
bb00fdef 6884 memset(&ctx.base_flow.tunnel, 0, sizeof ctx.base_flow.tunnel);
865ca6cf 6885
1520ef4f 6886 ofpbuf_reserve(ctx.odp_actions, NL_A_U32_SIZE);
c0e638aa 6887 xlate_wc_init(&ctx);
bb00fdef 6888
46c88433 6889 COVERAGE_INC(xlate_actions);
9583bc14 6890
2d9b49dd
BP
6891 xin->trace = xlate_report(&ctx, OFT_BRIDGE, "bridge(\"%s\")",
6892 xbridge->name);
1d361a81
BP
6893 if (xin->frozen_state) {
6894 const struct frozen_state *state = xin->frozen_state;
e672ff9b 6895
2d9b49dd
BP
6896 struct ovs_list *old_trace = xin->trace;
6897 xin->trace = xlate_report(&ctx, OFT_THAW, "thaw");
d6bef3cc 6898
e672ff9b 6899 if (xin->ofpacts_len > 0 || ctx.rule) {
2d9b49dd
BP
6900 xlate_report_error(&ctx, "Recirculation conflict (%s)!",
6901 xin->ofpacts_len ? "actions" : "rule");
fff1b9c0 6902 ctx.error = XLATE_RECIRCULATION_CONFLICT;
1520ef4f 6903 goto exit;
e672ff9b
JR
6904 }
6905
6906 /* Set the bridge for post-recirculation processing if needed. */
07a3cd5c 6907 if (!uuid_equals(&ctx.xbridge->ofproto->uuid, &state->ofproto_uuid)) {
2082425c 6908 const struct xbridge *new_bridge
290835f9 6909 = xbridge_lookup_by_uuid(xcfg, &state->ofproto_uuid);
e672ff9b
JR
6910
6911 if (OVS_UNLIKELY(!new_bridge)) {
6912 /* Drop the packet if the bridge cannot be found. */
2d9b49dd 6913 xlate_report_error(&ctx, "Frozen bridge no longer exists.");
fff1b9c0 6914 ctx.error = XLATE_BRIDGE_NOT_FOUND;
2d9b49dd 6915 xin->trace = old_trace;
1520ef4f 6916 goto exit;
e672ff9b
JR
6917 }
6918 ctx.xbridge = new_bridge;
1f4a8933
JR
6919 /* The bridge is now known so obtain its table version. */
6920 ctx.xin->tables_version
6921 = ofproto_dpif_get_tables_version(ctx.xbridge->ofproto);
e672ff9b
JR
6922 }
6923
1d361a81
BP
6924 /* Set the thawed table id. Note: A table lookup is done only if there
6925 * are no frozen actions. */
2082425c 6926 ctx.table_id = state->table_id;
2d9b49dd
BP
6927 xlate_report(&ctx, OFT_THAW,
6928 "Resuming from table %"PRIu8, ctx.table_id);
e672ff9b 6929
40b0fbd3 6930 ctx.conntracked = state->conntracked;
07659514 6931 if (!state->conntracked) {
72fe7578 6932 clear_conntrack(&ctx);
07659514
JS
6933 }
6934
e672ff9b 6935 /* Restore pipeline metadata. May change flow's in_port and other
1d361a81
BP
6936 * metadata to the values that existed when freezing was triggered. */
6937 frozen_metadata_to_flow(&state->metadata, flow);
e672ff9b
JR
6938
6939 /* Restore stack, if any. */
2082425c 6940 if (state->stack) {
84cf3c1f 6941 ofpbuf_put(&ctx.stack, state->stack, state->stack_size);
e672ff9b
JR
6942 }
6943
29bae541
BP
6944 /* Restore mirror state. */
6945 ctx.mirrors = state->mirrors;
6946
e672ff9b 6947 /* Restore action set, if any. */
2082425c 6948 if (state->action_set_len) {
2d9b49dd 6949 xlate_report_actions(&ctx, OFT_THAW, "Restoring action set",
417509fa 6950 state->action_set, state->action_set_len);
d6bef3cc 6951
7e7e8dbb
BP
6952 flow->actset_output = OFPP_UNSET;
6953 xlate_write_actions__(&ctx, state->action_set,
6954 state->action_set_len);
e672ff9b
JR
6955 }
6956
1d361a81
BP
6957 /* Restore frozen actions. If there are no actions, processing will
6958 * start with a lookup in the table set above. */
417509fa
BP
6959 xin->ofpacts = state->ofpacts;
6960 xin->ofpacts_len = state->ofpacts_len;
6961 if (state->ofpacts_len) {
2d9b49dd 6962 xlate_report_actions(&ctx, OFT_THAW, "Restoring actions",
d6bef3cc 6963 xin->ofpacts, xin->ofpacts_len);
e672ff9b 6964 }
e672ff9b 6965
2d9b49dd
BP
6966 xin->trace = old_trace;
6967 } else if (OVS_UNLIKELY(flow->recirc_id)) {
6968 xlate_report_error(&ctx,
6969 "Recirculation context not found for ID %"PRIx32,
6970 flow->recirc_id);
fff1b9c0 6971 ctx.error = XLATE_NO_RECIRCULATION_CONTEXT;
1520ef4f 6972 goto exit;
e672ff9b 6973 }
9583bc14 6974
8d8ab6c2
JG
6975 /* Tunnel metadata in udpif format must be normalized before translation. */
6976 if (flow->tunnel.flags & FLOW_TNL_F_UDPIF) {
5b09d9f7
MS
6977 const struct tun_table *tun_tab = ofproto_get_tun_tab(
6978 &ctx.xbridge->ofproto->up);
8d8ab6c2
JG
6979 int err;
6980
6981 err = tun_metadata_from_geneve_udpif(tun_tab, &xin->upcall_flow->tunnel,
6982 &xin->upcall_flow->tunnel,
6983 &flow->tunnel);
6984 if (err) {
2d9b49dd 6985 xlate_report_error(&ctx, "Invalid Geneve tunnel metadata");
8d8ab6c2
JG
6986 ctx.error = XLATE_INVALID_TUNNEL_METADATA;
6987 goto exit;
6988 }
6989 } else if (!flow->tunnel.metadata.tab) {
6990 /* If the original flow did not come in on a tunnel, then it won't have
6991 * FLOW_TNL_F_UDPIF set. However, we still need to have a metadata
6992 * table in case we generate tunnel actions. */
5b09d9f7
MS
6993 flow->tunnel.metadata.tab = ofproto_get_tun_tab(
6994 &ctx.xbridge->ofproto->up);
8d8ab6c2
JG
6995 }
6996 ctx.wc->masks.tunnel.metadata.tab = flow->tunnel.metadata.tab;
6997
beb75a40
JS
6998 /* Get the proximate input port of the packet. (If xin->frozen_state,
6999 * flow->in_port is the ultimate input port of the packet.) */
7000 struct xport *in_port = get_ofp_port(xbridge,
7001 ctx.base_flow.in_port.ofp_port);
43e73536
ZB
7002 if (in_port && !in_port->peer) {
7003 ctx.xin->xport_uuid = in_port->uuid;
7004 }
beb75a40 7005
875ab130
BP
7006 if (flow->packet_type != htonl(PT_ETH) && in_port &&
7007 in_port->pt_mode == NETDEV_PT_LEGACY_L3 && ctx.table_id == 0) {
beb75a40
JS
7008 /* Add dummy Ethernet header to non-L2 packet if it's coming from a
7009 * L3 port. So all packets will be L2 packets for lookup.
7010 * The dl_type has already been set from the packet_type. */
7011 flow->packet_type = htonl(PT_ETH);
7012 flow->dl_src = eth_addr_zero;
7013 flow->dl_dst = eth_addr_zero;
f839892a 7014 ctx.pending_encap = true;
beb75a40
JS
7015 }
7016
10c44245 7017 if (!xin->ofpacts && !ctx.rule) {
b2e89cc9 7018 ctx.rule = rule_dpif_lookup_from_table(
1f4a8933 7019 ctx.xbridge->ofproto, ctx.xin->tables_version, flow, ctx.wc,
1e1e1d19 7020 ctx.xin->resubmit_stats, &ctx.table_id,
a027899e 7021 flow->in_port.ofp_port, true, true, ctx.xin->xcache);
10c44245 7022 if (ctx.xin->resubmit_stats) {
b2e89cc9 7023 rule_dpif_credit_stats(ctx.rule, ctx.xin->resubmit_stats);
10c44245 7024 }
b256dc52
JS
7025 if (ctx.xin->xcache) {
7026 struct xc_entry *entry;
7027
7028 entry = xlate_cache_add_entry(ctx.xin->xcache, XC_RULE);
901a517e 7029 entry->rule = ctx.rule;
07a3cd5c 7030 ofproto_rule_ref(&ctx.rule->up);
b256dc52 7031 }
a8c31348 7032
2d9b49dd 7033 xlate_report_table(&ctx, ctx.rule, ctx.table_id);
10c44245 7034 }
10c44245 7035
1d361a81
BP
7036 /* Tunnel stats only for not-thawed packets. */
7037 if (!xin->frozen_state && in_port && in_port->is_tunnel) {
b256dc52
JS
7038 if (ctx.xin->resubmit_stats) {
7039 netdev_vport_inc_rx(in_port->netdev, ctx.xin->resubmit_stats);
7040 if (in_port->bfd) {
7041 bfd_account_rx(in_port->bfd, ctx.xin->resubmit_stats);
7042 }
7043 }
7044 if (ctx.xin->xcache) {
7045 struct xc_entry *entry;
7046
7047 entry = xlate_cache_add_entry(ctx.xin->xcache, XC_NETDEV);
901a517e
JR
7048 entry->dev.rx = netdev_ref(in_port->netdev);
7049 entry->dev.bfd = bfd_ref(in_port->bfd);
d6fc5f57
EJ
7050 }
7051 }
7052
1d361a81 7053 if (!xin->frozen_state && process_special(&ctx, in_port)) {
bef1403e
BP
7054 /* process_special() did all the processing for this packet.
7055 *
1d361a81
BP
7056 * We do not perform special processing on thawed packets, since that
7057 * was done before they were frozen and should not be redone. */
bef1403e
BP
7058 } else if (in_port && in_port->xbundle
7059 && xbundle_mirror_out(xbridge, in_port->xbundle)) {
2d9b49dd
BP
7060 xlate_report_error(&ctx, "dropping packet received on port "
7061 "%s, which is reserved exclusively for mirroring",
7062 in_port->xbundle->name);
bef1403e 7063 } else {
1d361a81 7064 /* Sampling is done on initial reception; don't redo after thawing. */
a6092018 7065 unsigned int user_cookie_offset = 0;
1d361a81 7066 if (!xin->frozen_state) {
a6092018
BP
7067 user_cookie_offset = compose_sflow_action(&ctx);
7068 compose_ipfix_action(&ctx, ODPP_NONE);
e672ff9b 7069 }
0731abc5 7070 size_t sample_actions_len = ctx.odp_actions->size;
9583bc14 7071
234c3da9
BP
7072 if (tnl_process_ecn(flow)
7073 && (!in_port || may_receive(in_port, &ctx))) {
1806291d
BP
7074 const struct ofpact *ofpacts;
7075 size_t ofpacts_len;
7076
7077 if (xin->ofpacts) {
7078 ofpacts = xin->ofpacts;
7079 ofpacts_len = xin->ofpacts_len;
7080 } else if (ctx.rule) {
7081 const struct rule_actions *actions
07a3cd5c 7082 = rule_get_actions(&ctx.rule->up);
1806291d
BP
7083 ofpacts = actions->ofpacts;
7084 ofpacts_len = actions->ofpacts_len;
07a3cd5c 7085 ctx.rule_cookie = ctx.rule->up.flow_cookie;
1806291d
BP
7086 } else {
7087 OVS_NOT_REACHED();
7088 }
7089
7efbc3b7 7090 mirror_ingress_packet(&ctx);
feee58b9 7091 do_xlate_actions(ofpacts, ofpacts_len, &ctx, true);
fff1b9c0
JR
7092 if (ctx.error) {
7093 goto exit;
7094 }
9583bc14
EJ
7095
7096 /* We've let OFPP_NORMAL and the learning action look at the
1d361a81 7097 * packet, so cancel all actions and freezing if forwarding is
8a5fb3b4 7098 * disabled. */
9efd308e
DV
7099 if (in_port && (!xport_stp_forward_state(in_port) ||
7100 !xport_rstp_forward_state(in_port))) {
1520ef4f 7101 ctx.odp_actions->size = sample_actions_len;
1d361a81 7102 ctx_cancel_freeze(&ctx);
8a5fb3b4
BP
7103 ofpbuf_clear(&ctx.action_set);
7104 }
7105
1d361a81 7106 if (!ctx.freezing) {
8a5fb3b4 7107 xlate_action_set(&ctx);
e672ff9b 7108 }
1d361a81 7109 if (ctx.freezing) {
77ab5fd2 7110 finish_freezing(&ctx);
9583bc14
EJ
7111 }
7112 }
7113
e672ff9b 7114 /* Output only fully processed packets. */
1d361a81 7115 if (!ctx.freezing
e672ff9b 7116 && xbridge->has_in_band
ce4a6b76
BP
7117 && in_band_must_output_to_local_port(flow)
7118 && !actions_output_to_local_port(&ctx)) {
11938578 7119 compose_output_action(&ctx, OFPP_LOCAL, NULL, false, false);
9583bc14 7120 }
aaa0fbae 7121
a6092018
BP
7122 if (user_cookie_offset) {
7123 fix_sflow_action(&ctx, user_cookie_offset);
e672ff9b 7124 }
9583bc14
EJ
7125 }
7126
1520ef4f 7127 if (nl_attr_oversized(ctx.odp_actions->size)) {
542024c4 7128 /* These datapath actions are too big for a Netlink attribute, so we
0f032e95
BP
7129 * can't hand them to the kernel directly. dpif_execute() can execute
7130 * them one by one with help, so just mark the result as SLOW_ACTION to
7131 * prevent the flow from being installed. */
7132 COVERAGE_INC(xlate_actions_oversize);
7133 ctx.xout->slow |= SLOW_ACTION;
1520ef4f 7134 } else if (too_many_output_actions(ctx.odp_actions)) {
7d031d7e
BP
7135 COVERAGE_INC(xlate_actions_too_many_output);
7136 ctx.xout->slow |= SLOW_ACTION;
542024c4
BP
7137 }
7138
64fb5f82
JP
7139 /* Update NetFlow for non-frozen traffic. */
7140 if (xbridge->netflow && !xin->frozen_state) {
1806291d
BP
7141 if (ctx.xin->resubmit_stats) {
7142 netflow_flow_update(xbridge->netflow, flow,
2031ef97 7143 ctx.nf_output_iface,
1806291d
BP
7144 ctx.xin->resubmit_stats);
7145 }
7146 if (ctx.xin->xcache) {
7147 struct xc_entry *entry;
b256dc52 7148
1806291d 7149 entry = xlate_cache_add_entry(ctx.xin->xcache, XC_NETFLOW);
901a517e
JR
7150 entry->nf.netflow = netflow_ref(xbridge->netflow);
7151 entry->nf.flow = xmemdup(flow, sizeof *flow);
7152 entry->nf.iface = ctx.nf_output_iface;
d6fc5f57
EJ
7153 }
7154 }
7155
8d8ab6c2
JG
7156 /* Translate tunnel metadata masks to udpif format if necessary. */
7157 if (xin->upcall_flow->tunnel.flags & FLOW_TNL_F_UDPIF) {
7158 if (ctx.wc->masks.tunnel.metadata.present.map) {
7159 const struct flow_tnl *upcall_tnl = &xin->upcall_flow->tunnel;
7160 struct geneve_opt opts[TLV_TOT_OPT_SIZE /
7161 sizeof(struct geneve_opt)];
7162
7163 tun_metadata_to_geneve_udpif_mask(&flow->tunnel,
7164 &ctx.wc->masks.tunnel,
7165 upcall_tnl->metadata.opts.gnv,
7166 upcall_tnl->metadata.present.len,
7167 opts);
7168 memset(&ctx.wc->masks.tunnel.metadata, 0,
7169 sizeof ctx.wc->masks.tunnel.metadata);
7170 memcpy(&ctx.wc->masks.tunnel.metadata.opts.gnv, opts,
7171 upcall_tnl->metadata.present.len);
7172 }
7173 ctx.wc->masks.tunnel.metadata.present.len = 0xff;
7174 ctx.wc->masks.tunnel.metadata.tab = NULL;
7175 ctx.wc->masks.tunnel.flags |= FLOW_TNL_F_UDPIF;
7176 } else if (!xin->upcall_flow->tunnel.metadata.tab) {
7177 /* If we didn't have options in UDPIF format and didn't have an existing
7178 * metadata table, then it means that there were no options at all when
7179 * we started processing and any wildcards we picked up were from
7180 * action generation. Without options on the incoming packet, wildcards
7181 * aren't meaningful. To avoid them possibly getting misinterpreted,
7182 * just clear everything. */
7183 if (ctx.wc->masks.tunnel.metadata.present.map) {
7184 memset(&ctx.wc->masks.tunnel.metadata, 0,
7185 sizeof ctx.wc->masks.tunnel.metadata);
7186 } else {
7187 ctx.wc->masks.tunnel.metadata.tab = NULL;
7188 }
7189 }
7190
c0e638aa 7191 xlate_wc_finish(&ctx);
1520ef4f
BP
7192
7193exit:
8d8ab6c2
JG
7194 /* Reset the table to what it was when we came in. If we only fetched
7195 * it locally, then it has no meaning outside of flow translation. */
7196 flow->tunnel.metadata.tab = xin->upcall_flow->tunnel.metadata.tab;
7197
1520ef4f
BP
7198 ofpbuf_uninit(&ctx.stack);
7199 ofpbuf_uninit(&ctx.action_set);
1d361a81 7200 ofpbuf_uninit(&ctx.frozen_actions);
1520ef4f 7201 ofpbuf_uninit(&scratch_actions);
1fc11c59 7202 ofpbuf_delete(ctx.encap_data);
fff1b9c0
JR
7203
7204 /* Make sure we return a "drop flow" in case of an error. */
7205 if (ctx.error) {
7206 xout->slow = 0;
7207 if (xin->odp_actions) {
7208 ofpbuf_clear(xin->odp_actions);
7209 }
7210 }
7211 return ctx.error;
91d6cd12
AW
7212}
7213
77ab5fd2
BP
7214enum ofperr
7215xlate_resume(struct ofproto_dpif *ofproto,
7216 const struct ofputil_packet_in_private *pin,
7217 struct ofpbuf *odp_actions,
7218 enum slow_path_reason *slow)
7219{
7220 struct dp_packet packet;
4d617a87
BP
7221 dp_packet_use_const(&packet, pin->base.packet,
7222 pin->base.packet_len);
77ab5fd2
BP
7223
7224 struct flow flow;
7225 flow_extract(&packet, &flow);
7226
7227 struct xlate_in xin;
1f4a8933
JR
7228 xlate_in_init(&xin, ofproto, ofproto_dpif_get_tables_version(ofproto),
7229 &flow, 0, NULL, ntohs(flow.tcp_flags),
77ab5fd2
BP
7230 &packet, NULL, odp_actions);
7231
7232 struct ofpact_note noop;
7233 ofpact_init_NOTE(&noop);
7234 noop.length = 0;
7235
7236 bool any_actions = pin->actions_len > 0;
7237 struct frozen_state state = {
7238 .table_id = 0, /* Not the table where NXAST_PAUSE was executed. */
7239 .ofproto_uuid = pin->bridge,
7240 .stack = pin->stack,
84cf3c1f 7241 .stack_size = pin->stack_size,
77ab5fd2
BP
7242 .mirrors = pin->mirrors,
7243 .conntracked = pin->conntracked,
43e73536 7244 .xport_uuid = UUID_ZERO,
77ab5fd2
BP
7245
7246 /* When there are no actions, xlate_actions() will search the flow
7247 * table. We don't want it to do that (we want it to resume), so
7248 * supply a no-op action if there aren't any.
7249 *
7250 * (We can't necessarily avoid translating actions entirely if there
7251 * aren't any actions, because there might be some finishing-up to do
7252 * at the end of the pipeline, and we don't check for those
7253 * conditions.) */
7254 .ofpacts = any_actions ? pin->actions : &noop.ofpact,
7255 .ofpacts_len = any_actions ? pin->actions_len : sizeof noop,
7256
7257 .action_set = pin->action_set,
7258 .action_set_len = pin->action_set_len,
7259 };
7260 frozen_metadata_from_flow(&state.metadata,
4d617a87 7261 &pin->base.flow_metadata.flow);
77ab5fd2
BP
7262 xin.frozen_state = &state;
7263
7264 struct xlate_out xout;
7265 enum xlate_error error = xlate_actions(&xin, &xout);
7266 *slow = xout.slow;
7267 xlate_out_uninit(&xout);
7268
7269 /* xlate_actions() can generate a number of errors, but only
7270 * XLATE_BRIDGE_NOT_FOUND really stands out to me as one that we should be
7271 * sure to report over OpenFlow. The others could come up in packet-outs
7272 * or regular flow translation and I don't think that it's going to be too
7273 * useful to report them to the controller. */
7274 return error == XLATE_BRIDGE_NOT_FOUND ? OFPERR_NXR_STALE : 0;
7275}
7276
2eb79142
JG
7277/* Sends 'packet' out 'ofport'. If 'port' is a tunnel and that tunnel type
7278 * supports a notion of an OAM flag, sets it if 'oam' is true.
91d6cd12
AW
7279 * May modify 'packet'.
7280 * Returns 0 if successful, otherwise a positive errno value. */
7281int
2eb79142
JG
7282xlate_send_packet(const struct ofport_dpif *ofport, bool oam,
7283 struct dp_packet *packet)
91d6cd12 7284{
84f0f298 7285 struct xlate_cfg *xcfg = ovsrcu_get(struct xlate_cfg *, &xcfgp);
91d6cd12 7286 struct xport *xport;
2eb79142
JG
7287 uint64_t ofpacts_stub[1024 / 8];
7288 struct ofpbuf ofpacts;
91d6cd12 7289 struct flow flow;
91d6cd12 7290
2eb79142 7291 ofpbuf_use_stack(&ofpacts, ofpacts_stub, sizeof ofpacts_stub);
91d6cd12 7292 /* Use OFPP_NONE as the in_port to avoid special packet processing. */
cf62fa4c 7293 flow_extract(packet, &flow);
b5e7e61a 7294 flow.in_port.ofp_port = OFPP_NONE;
91d6cd12 7295
84f0f298 7296 xport = xport_lookup(xcfg, ofport);
91d6cd12 7297 if (!xport) {
02ea2703 7298 return EINVAL;
91d6cd12 7299 }
2eb79142
JG
7300
7301 if (oam) {
71f21279
BP
7302 const ovs_be16 flag = htons(NX_TUN_FLAG_OAM);
7303 ofpact_put_set_field(&ofpacts, mf_from_id(MFF_TUN_FLAGS),
7304 &flag, &flag);
2eb79142
JG
7305 }
7306
7307 ofpact_put_OUTPUT(&ofpacts)->port = xport->ofp_port;
e491a67a 7308
1f4a8933
JR
7309 /* Actions here are not referring to anything versionable (flow tables or
7310 * groups) so we don't need to worry about the version here. */
7311 return ofproto_dpif_execute_actions(xport->xbridge->ofproto,
7312 OVS_VERSION_MAX, &flow, NULL,
2eb79142 7313 ofpacts.data, ofpacts.size, packet);
9583bc14 7314}
b256dc52 7315
901a517e 7316void
064799a1
JR
7317xlate_mac_learning_update(const struct ofproto_dpif *ofproto,
7318 ofp_port_t in_port, struct eth_addr dl_src,
7319 int vlan, bool is_grat_arp)
b256dc52 7320{
84f0f298 7321 struct xlate_cfg *xcfg = ovsrcu_get(struct xlate_cfg *, &xcfgp);
b256dc52
JS
7322 struct xbridge *xbridge;
7323 struct xbundle *xbundle;
b256dc52 7324
84f0f298 7325 xbridge = xbridge_lookup(xcfg, ofproto);
b256dc52
JS
7326 if (!xbridge) {
7327 return;
7328 }
7329
2d9b49dd 7330 xbundle = lookup_input_bundle__(xbridge, in_port, NULL);
b256dc52
JS
7331 if (!xbundle) {
7332 return;
7333 }
7334
2d9b49dd 7335 update_learning_table__(xbridge, xbundle, dl_src, vlan, is_grat_arp);
b256dc52 7336}
bef503e8 7337
88186383
AZ
7338void
7339xlate_set_support(const struct ofproto_dpif *ofproto,
7340 const struct dpif_backer_support *support)
7341{
7342 struct xlate_cfg *xcfg = ovsrcu_get(struct xlate_cfg *, &xcfgp);
7343 struct xbridge *xbridge = xbridge_lookup(xcfg, ofproto);
7344
7345 if (xbridge) {
7346 xbridge->support = *support;
7347 }
7348}