]> git.proxmox.com Git - ovs.git/blame - ofproto/ofproto-dpif-xlate.c
ofproto-dpif: Refactor compose_clone()
[ovs.git] / ofproto / ofproto-dpif-xlate.c
CommitLineData
b827b231 1/* Copyright (c) 2009, 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017 Nicira, Inc.
9583bc14
EJ
2 *
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License. */
14
15#include <config.h>
16
17#include "ofproto/ofproto-dpif-xlate.h"
18
8449c4d6 19#include <errno.h>
a36de779
PS
20#include <arpa/inet.h>
21#include <net/if.h>
22#include <sys/socket.h>
23#include <netinet/in.h>
8449c4d6 24
db7d4e46 25#include "bfd.h"
9583bc14
EJ
26#include "bitmap.h"
27#include "bond.h"
28#include "bundle.h"
29#include "byte-order.h"
db7d4e46 30#include "cfm.h"
9583bc14
EJ
31#include "connmgr.h"
32#include "coverage.h"
46445c63 33#include "csum.h"
e14deea0 34#include "dp-packet.h"
9583bc14 35#include "dpif.h"
f7f1ea29 36#include "in-band.h"
db7d4e46 37#include "lacp.h"
9583bc14
EJ
38#include "learn.h"
39#include "mac-learning.h"
6d95c4e8 40#include "mcast-snooping.h"
9583bc14
EJ
41#include "multipath.h"
42#include "netdev-vport.h"
43#include "netlink.h"
44#include "nx-match.h"
45#include "odp-execute.h"
9583bc14 46#include "ofproto/ofproto-dpif-ipfix.h"
ec7ceaed 47#include "ofproto/ofproto-dpif-mirror.h"
60d02c72 48#include "ofproto/ofproto-dpif-monitor.h"
9583bc14 49#include "ofproto/ofproto-dpif-sflow.h"
2d9b49dd 50#include "ofproto/ofproto-dpif-trace.h"
901a517e 51#include "ofproto/ofproto-dpif-xlate-cache.h"
9583bc14 52#include "ofproto/ofproto-dpif.h"
6f00e29b 53#include "ofproto/ofproto-provider.h"
b598f214
BW
54#include "openvswitch/dynamic-string.h"
55#include "openvswitch/meta-flow.h"
56#include "openvswitch/list.h"
57#include "openvswitch/ofp-actions.h"
1fc11c59 58#include "openvswitch/ofp-ed-props.h"
b598f214
BW
59#include "openvswitch/vlog.h"
60#include "ovs-lldp.h"
a36de779 61#include "ovs-router.h"
b598f214
BW
62#include "packets.h"
63#include "tnl-neigh-cache.h"
a36de779 64#include "tnl-ports.h"
9583bc14 65#include "tunnel.h"
ee89ea7b 66#include "util.h"
9583bc14 67
46c88433 68COVERAGE_DEFINE(xlate_actions);
0f032e95 69COVERAGE_DEFINE(xlate_actions_oversize);
7d031d7e 70COVERAGE_DEFINE(xlate_actions_too_many_output);
9583bc14
EJ
71
72VLOG_DEFINE_THIS_MODULE(ofproto_dpif_xlate);
73
8a553e9a 74/* Maximum depth of flow table recursion (due to resubmit actions) in a
790c5d26
BP
75 * flow translation.
76 *
77 * The goal of limiting the depth of resubmits is to ensure that flow
78 * translation eventually terminates. Only resubmits to the same table or an
79 * earlier table count against the maximum depth. This is because resubmits to
80 * strictly monotonically increasing table IDs will eventually terminate, since
81 * any OpenFlow switch has a finite number of tables. OpenFlow tables are most
82 * commonly traversed in numerically increasing order, so this limit has little
83 * effect on conventionally designed OpenFlow pipelines.
84 *
85 * Outputs to patch ports and to groups also count against the depth limit. */
86#define MAX_DEPTH 64
8a553e9a 87
98b07853
BP
88/* Maximum number of resubmit actions in a flow translation, whether they are
89 * recursive or not. */
790c5d26 90#define MAX_RESUBMITS (MAX_DEPTH * MAX_DEPTH)
98b07853 91
46c88433
EJ
92struct xbridge {
93 struct hmap_node hmap_node; /* Node in global 'xbridges' map. */
94 struct ofproto_dpif *ofproto; /* Key in global 'xbridges' map. */
95
ca6ba700 96 struct ovs_list xbundles; /* Owned xbundles. */
46c88433
EJ
97 struct hmap xports; /* Indexed by ofp_port. */
98
99 char *name; /* Name used in log messages. */
89a8a7f0 100 struct dpif *dpif; /* Datapath interface. */
46c88433 101 struct mac_learning *ml; /* Mac learning handle. */
6d95c4e8 102 struct mcast_snooping *ms; /* Multicast Snooping handle. */
46c88433
EJ
103 struct mbridge *mbridge; /* Mirroring. */
104 struct dpif_sflow *sflow; /* SFlow handle, or null. */
105 struct dpif_ipfix *ipfix; /* Ipfix handle, or null. */
ce3955be 106 struct netflow *netflow; /* Netflow handle, or null. */
9d189a50 107 struct stp *stp; /* STP or null if disabled. */
9efd308e 108 struct rstp *rstp; /* RSTP or null if disabled. */
46c88433 109
46c88433
EJ
110 bool has_in_band; /* Bridge has in band control? */
111 bool forward_bpdu; /* Bridge forwards STP BPDUs? */
4b97b70d 112
b440dd8c
JS
113 /* Datapath feature support. */
114 struct dpif_backer_support support;
46c88433
EJ
115};
116
117struct xbundle {
118 struct hmap_node hmap_node; /* In global 'xbundles' map. */
119 struct ofbundle *ofbundle; /* Key in global 'xbundles' map. */
120
ca6ba700 121 struct ovs_list list_node; /* In parent 'xbridges' list. */
46c88433
EJ
122 struct xbridge *xbridge; /* Parent xbridge. */
123
ca6ba700 124 struct ovs_list xports; /* Contains "struct xport"s. */
46c88433
EJ
125
126 char *name; /* Name used in log messages. */
127 struct bond *bond; /* Nonnull iff more than one port. */
128 struct lacp *lacp; /* LACP handle or null. */
129
130 enum port_vlan_mode vlan_mode; /* VLAN mode. */
fed8962a
EG
131 uint16_t qinq_ethtype; /* Ethertype of dot1q-tunnel interface
132 * either 0x8100 or 0x88a8. */
46c88433
EJ
133 int vlan; /* -1=trunk port, else a 12-bit VLAN ID. */
134 unsigned long *trunks; /* Bitmap of trunked VLANs, if 'vlan' == -1.
135 * NULL if all VLANs are trunked. */
fed8962a
EG
136 unsigned long *cvlans; /* Bitmap of allowed customer vlans,
137 * NULL if all VLANs are allowed */
46c88433
EJ
138 bool use_priority_tags; /* Use 802.1p tag for frames in VLAN 0? */
139 bool floodable; /* No port has OFPUTIL_PC_NO_FLOOD set? */
c005f976 140 bool protected; /* Protected port mode */
46c88433
EJ
141};
142
143struct xport {
144 struct hmap_node hmap_node; /* Node in global 'xports' map. */
145 struct ofport_dpif *ofport; /* Key in global 'xports map. */
146
147 struct hmap_node ofp_node; /* Node in parent xbridge 'xports' map. */
148 ofp_port_t ofp_port; /* Key in parent xbridge 'xports' map. */
149
150 odp_port_t odp_port; /* Datapath port number or ODPP_NONE. */
151
ca6ba700 152 struct ovs_list bundle_node; /* In parent xbundle (if it exists). */
46c88433
EJ
153 struct xbundle *xbundle; /* Parent xbundle or null. */
154
155 struct netdev *netdev; /* 'ofport''s netdev. */
156
157 struct xbridge *xbridge; /* Parent bridge. */
158 struct xport *peer; /* Patch port peer or null. */
159
160 enum ofputil_port_config config; /* OpenFlow port configuration. */
dd8cd4b4 161 enum ofputil_port_state state; /* OpenFlow port state. */
92cf817b 162 int stp_port_no; /* STP port number or -1 if not in use. */
f025bcb7 163 struct rstp_port *rstp_port; /* RSTP port or null. */
46c88433 164
55954f6e
EJ
165 struct hmap skb_priorities; /* Map of 'skb_priority_to_dscp's. */
166
46c88433
EJ
167 bool may_enable; /* May be enabled in bonds. */
168 bool is_tunnel; /* Is a tunnel port. */
875ab130 169 enum netdev_pt_mode pt_mode; /* packet_type handling. */
46c88433
EJ
170
171 struct cfm *cfm; /* CFM handle or null. */
172 struct bfd *bfd; /* BFD handle or null. */
0477baa9 173 struct lldp *lldp; /* LLDP handle or null. */
46c88433
EJ
174};
175
4d0acc70
EJ
176struct xlate_ctx {
177 struct xlate_in *xin;
178 struct xlate_out *xout;
179
46c88433 180 const struct xbridge *xbridge;
4d0acc70
EJ
181
182 /* Flow at the last commit. */
183 struct flow base_flow;
184
185 /* Tunnel IP destination address as received. This is stored separately
186 * as the base_flow.tunnel is cleared on init to reflect the datapath
187 * behavior. Used to make sure not to send tunneled output to ourselves,
188 * which might lead to an infinite loop. This could happen easily
189 * if a tunnel is marked as 'ip_remote=flow', and the flow does not
190 * actually set the tun_dst field. */
e4d3706c 191 struct in6_addr orig_tunnel_ipv6_dst;
4d0acc70 192
84cf3c1f
JR
193 /* Stack for the push and pop actions. See comment above nx_stack_push()
194 * in nx-match.c for info on how the stack is stored. */
4d0acc70
EJ
195 struct ofpbuf stack;
196
197 /* The rule that we are currently translating, or NULL. */
198 struct rule_dpif *rule;
199
49a73e0c
BP
200 /* Flow translation populates this with wildcards relevant in translation.
201 * When 'xin->wc' is nonnull, this is the same pointer. When 'xin->wc' is
c0e638aa 202 * null, this is a pointer to a temporary buffer. */
49a73e0c
BP
203 struct flow_wildcards *wc;
204
1520ef4f
BP
205 /* Output buffer for datapath actions. When 'xin->odp_actions' is nonnull,
206 * this is the same pointer. When 'xin->odp_actions' is null, this points
207 * to a scratch ofpbuf. This allows code to add actions to
208 * 'ctx->odp_actions' without worrying about whether the caller really
209 * wants actions. */
210 struct ofpbuf *odp_actions;
211
790c5d26
BP
212 /* Statistics maintained by xlate_table_action().
213 *
2d9b49dd 214 * These statistics limit the amount of work that a single flow
790c5d26
BP
215 * translation can perform. The goal of the first of these, 'depth', is
216 * primarily to prevent translation from performing an infinite amount of
217 * work. It counts the current depth of nested "resubmit"s (and a few
218 * other activities); when a resubmit returns, it decreases. Resubmits to
219 * tables in strictly monotonically increasing order don't contribute to
220 * 'depth' because they cannot cause a flow translation to take an infinite
221 * amount of time (because the number of tables is finite). Translation
222 * aborts when 'depth' exceeds MAX_DEPTH.
223 *
224 * 'resubmits', on the other hand, prevents flow translation from
225 * performing an extraordinarily large while still finite amount of work.
226 * It counts the total number of resubmits (and a few other activities)
227 * that have been executed. Returning from a resubmit does not affect this
228 * counter. Thus, this limits the amount of work that a particular
229 * translation can perform. Translation aborts when 'resubmits' exceeds
230 * MAX_RESUBMITS (which is much larger than MAX_DEPTH).
231 */
790c5d26 232 int depth; /* Current resubmit nesting depth. */
98b07853 233 int resubmits; /* Total number of resubmits. */
5a070238 234 bool in_group; /* Currently translating ofgroup, if true. */
029ca940 235 bool in_action_set; /* Currently translating action_set, if true. */
331c07ac
YHW
236 bool in_packet_out; /* Currently translating a packet_out msg, if
237 * true. */
1fc11c59
JS
238 bool pending_encap; /* True when waiting to commit a pending
239 * encap action. */
240 struct ofpbuf *encap_data; /* May contain a pointer to an ofpbuf with
241 * context for the datapath encap action.*/
98b07853 242
4d0acc70 243 uint8_t table_id; /* OpenFlow table ID where flow was found. */
8b1e5560
JR
244 ovs_be64 rule_cookie; /* Cookie of the rule being translated. */
245 uint32_t orig_skb_priority; /* Priority when packet arrived. */
4d0acc70 246 uint32_t sflow_n_outputs; /* Number of output ports. */
4e022ec0 247 odp_port_t sflow_odp_port; /* Output port for composing sFlow action. */
2031ef97 248 ofp_port_t nf_output_iface; /* Output interface index for NetFlow. */
4d0acc70 249 bool exit; /* No further actions should be processed. */
3d6151f3 250 mirror_mask_t mirrors; /* Bitmap of associated mirrors. */
1356dbd1 251 int mirror_snaplen; /* Max size of a mirror packet in byte. */
7fdb60a7 252
1d361a81
BP
253 /* Freezing Translation
254 * ====================
e672ff9b 255 *
1d361a81
BP
256 * At some point during translation, the code may recognize the need to halt
257 * and checkpoint the translation in a way that it can be restarted again
258 * later. We call the checkpointing process "freezing" and the restarting
259 * process "thawing".
e672ff9b 260 *
1d361a81 261 * The use cases for freezing are:
e672ff9b 262 *
1d361a81
BP
263 * - "Recirculation", where the translation process discovers that it
264 * doesn't have enough information to complete translation without
265 * actually executing the actions that have already been translated,
266 * which provides the additionally needed information. In these
267 * situations, translation freezes translation and assigns the frozen
268 * data a unique "recirculation ID", which it associates with the data
269 * in a table in userspace (see ofproto-dpif-rid.h). It also adds a
270 * OVS_ACTION_ATTR_RECIRC action specifying that ID to the datapath
271 * actions. When a packet hits that action, the datapath looks its
272 * flow up again using the ID. If there's a miss, it comes back to
273 * userspace, which find the recirculation table entry for the ID,
274 * thaws the associated frozen data, and continues translation from
275 * that point given the additional information that is now known.
e672ff9b 276 *
1d361a81
BP
277 * The archetypal example is MPLS. As MPLS is implemented in
278 * OpenFlow, the protocol that follows the last MPLS label becomes
279 * known only when that label is popped by an OpenFlow action. That
280 * means that Open vSwitch can't extract the headers beyond the MPLS
281 * labels until the pop action is executed. Thus, at that point
282 * translation uses the recirculation process to extract the headers
283 * beyond the MPLS labels.
e672ff9b 284 *
1d361a81
BP
285 * (OVS also uses OVS_ACTION_ATTR_RECIRC to implement hashing for
286 * output to bonds. OVS pre-populates all the datapath flows for bond
287 * output in the datapath, though, which means that the elaborate
288 * process of coming back to userspace for a second round of
289 * translation isn't needed, and so bonds don't follow the above
290 * process.)
e672ff9b 291 *
77ab5fd2
BP
292 * - "Continuation". A continuation is a way for an OpenFlow controller
293 * to interpose on a packet's traversal of the OpenFlow tables. When
294 * the translation process encounters a "controller" action with the
295 * "pause" flag, it freezes translation, serializes the frozen data,
296 * and sends it to an OpenFlow controller. The controller then
297 * examines and possibly modifies the frozen data and eventually sends
298 * it back to the switch, which thaws it and continues translation.
e672ff9b 299 *
1d361a81
BP
300 * The main problem of freezing translation is preserving state, so that
301 * when the translation is thawed later it resumes from where it left off,
302 * without disruption. In particular, actions must be preserved as follows:
303 *
304 * - If we're freezing because an action needed more information, the
305 * action that prompted it.
306 *
307 * - Any actions remaining to be translated within the current flow.
308 *
309 * - If translation was frozen within a NXAST_RESUBMIT, then any actions
310 * following the resubmit action. Resubmit actions can be nested, so
311 * this has to go all the way up the control stack.
e672ff9b
JR
312 *
313 * - The OpenFlow 1.1+ action set.
314 *
315 * State that actions and flow table lookups can depend on, such as the
316 * following, must also be preserved:
317 *
318 * - Metadata fields (input port, registers, OF1.1+ metadata, ...).
319 *
1d361a81 320 * - The stack used by NXAST_STACK_PUSH and NXAST_STACK_POP actions.
e672ff9b
JR
321 *
322 * - The table ID and cookie of the flow being translated at each level
1d361a81
BP
323 * of the control stack, because these can become visible through
324 * OFPAT_CONTROLLER actions (and other ways).
e672ff9b
JR
325 *
326 * Translation allows for the control of this state preservation via these
1d361a81
BP
327 * members. When a need to freeze translation is identified, the
328 * translation process:
e672ff9b 329 *
1d361a81 330 * 1. Sets 'freezing' to true.
e672ff9b
JR
331 *
332 * 2. Sets 'exit' to true to tell later steps that we're exiting from the
333 * translation process.
334 *
1d361a81
BP
335 * 3. Adds an OFPACT_UNROLL_XLATE action to 'frozen_actions', and points
336 * frozen_actions.header to the action to make it easy to find it later.
337 * This action holds the current table ID and cookie so that they can be
338 * restored during a post-recirculation upcall translation.
e672ff9b
JR
339 *
340 * 4. Adds the action that prompted recirculation and any actions following
1d361a81 341 * it within the same flow to 'frozen_actions', so that they can be
8a5fb3b4 342 * executed during a post-recirculation upcall translation.
e672ff9b
JR
343 *
344 * 5. Returns.
345 *
346 * 6. The action that prompted recirculation might be nested in a stack of
347 * nested "resubmit"s that have actions remaining. Each of these notices
1d361a81
BP
348 * that we're exiting and freezing and responds by adding more
349 * OFPACT_UNROLL_XLATE actions to 'frozen_actions', as necessary,
350 * followed by any actions that were yet unprocessed.
e672ff9b 351 *
1d361a81
BP
352 * If we're freezing because of recirculation, the caller generates a
353 * recirculation ID and associates all the state produced by this process
354 * with it. For post-recirculation upcall translation, the caller passes it
355 * back in for the new translation to execute. The process yielded a set of
356 * ofpacts that can be translated directly, so it is not much of a special
357 * case at that point.
e672ff9b 358 */
1d361a81 359 bool freezing;
53cc166a
JR
360 bool recirc_update_dp_hash; /* Generated recirculation will be preceded
361 * by datapath HASH action to get an updated
362 * dp_hash after recirculation. */
363 uint32_t dp_hash_alg;
364 uint32_t dp_hash_basis;
1d361a81 365 struct ofpbuf frozen_actions;
77ab5fd2 366 const struct ofpact_controller *pause;
0d3239e8 367 struct flow *paused_flow;
e672ff9b 368
e12ec36b
SH
369 /* True if a packet was but is no longer MPLS (due to an MPLS pop action).
370 * This is a trigger for recirculation in cases where translating an action
371 * or looking up a flow requires access to the fields of the packet after
372 * the MPLS label stack that was originally present. */
373 bool was_mpls;
374
07659514
JS
375 /* True if conntrack has been performed on this packet during processing
376 * on the current bridge. This is used to determine whether conntrack
1d361a81 377 * state from the datapath should be honored after thawing. */
07659514
JS
378 bool conntracked;
379
9ac0aada
JR
380 /* Pointer to an embedded NAT action in a conntrack action, or NULL. */
381 struct ofpact_nat *ct_nat_action;
382
7fdb60a7
SH
383 /* OpenFlow 1.1+ action set.
384 *
385 * 'action_set' accumulates "struct ofpact"s added by OFPACT_WRITE_ACTIONS.
386 * When translation is otherwise complete, ofpacts_execute_action_set()
387 * converts it to a set of "struct ofpact"s that can be translated into
ed9c9e3e 388 * datapath actions. */
c61f3870 389 bool action_set_has_group; /* Action set contains OFPACT_GROUP? */
7fdb60a7 390 struct ofpbuf action_set; /* Action set. */
fff1b9c0
JR
391
392 enum xlate_error error; /* Translation failed. */
4d0acc70
EJ
393};
394
f0fb825a
EG
395/* Structure to track VLAN manipulation */
396struct xvlan_single {
397 uint16_t tpid;
398 uint16_t vid;
399 uint16_t pcp;
400};
401
402struct xvlan {
403 struct xvlan_single v[FLOW_MAX_VLAN_HEADERS];
404};
405
fff1b9c0
JR
406const char *xlate_strerror(enum xlate_error error)
407{
408 switch (error) {
409 case XLATE_OK:
410 return "OK";
411 case XLATE_BRIDGE_NOT_FOUND:
412 return "Bridge not found";
413 case XLATE_RECURSION_TOO_DEEP:
414 return "Recursion too deep";
415 case XLATE_TOO_MANY_RESUBMITS:
416 return "Too many resubmits";
417 case XLATE_STACK_TOO_DEEP:
418 return "Stack too deep";
419 case XLATE_NO_RECIRCULATION_CONTEXT:
420 return "No recirculation context";
421 case XLATE_RECIRCULATION_CONFLICT:
422 return "Recirculation conflict";
423 case XLATE_TOO_MANY_MPLS_LABELS:
424 return "Too many MPLS labels";
8d8ab6c2
JG
425 case XLATE_INVALID_TUNNEL_METADATA:
426 return "Invalid tunnel metadata";
fff1b9c0
JR
427 }
428 return "Unknown error";
429}
430
ed9c9e3e 431static void xlate_action_set(struct xlate_ctx *ctx);
704bb0bf 432static void xlate_commit_actions(struct xlate_ctx *ctx);
ed9c9e3e 433
8bdb2bdb
SC
434static void
435apply_nested_clone_actions(struct xlate_ctx *ctx, const struct xport *in_dev,
436 struct xport *out_dev);
437
1d741d6d 438static void
1d361a81 439ctx_trigger_freeze(struct xlate_ctx *ctx)
1d741d6d
JR
440{
441 ctx->exit = true;
1d361a81 442 ctx->freezing = true;
1d741d6d
JR
443}
444
53cc166a
JR
445static void
446ctx_trigger_recirculate_with_hash(struct xlate_ctx *ctx, uint32_t type,
447 uint32_t basis)
448{
449 ctx->exit = true;
450 ctx->freezing = true;
451 ctx->recirc_update_dp_hash = true;
452 ctx->dp_hash_alg = type;
453 ctx->dp_hash_basis = basis;
454}
455
1d741d6d 456static bool
1d361a81 457ctx_first_frozen_action(const struct xlate_ctx *ctx)
1d741d6d 458{
1d361a81 459 return !ctx->frozen_actions.size;
e672ff9b
JR
460}
461
3293cb85 462static void
1d361a81 463ctx_cancel_freeze(struct xlate_ctx *ctx)
3293cb85 464{
1d361a81
BP
465 if (ctx->freezing) {
466 ctx->freezing = false;
53cc166a 467 ctx->recirc_update_dp_hash = false;
1d361a81
BP
468 ofpbuf_clear(&ctx->frozen_actions);
469 ctx->frozen_actions.header = NULL;
3293cb85
BP
470 }
471}
472
77ab5fd2 473static void finish_freezing(struct xlate_ctx *ctx);
e672ff9b 474
9583bc14
EJ
475/* A controller may use OFPP_NONE as the ingress port to indicate that
476 * it did not arrive on a "real" port. 'ofpp_none_bundle' exists for
477 * when an input bundle is needed for validation (e.g., mirroring or
478 * OFPP_NORMAL processing). It is not connected to an 'ofproto' or have
3548d242
BP
479 * any 'port' structs, so care must be taken when dealing with it. */
480static struct xbundle ofpp_none_bundle = {
481 .name = "OFPP_NONE",
482 .vlan_mode = PORT_VLAN_TRUNK
483};
9583bc14 484
55954f6e
EJ
485/* Node in 'xport''s 'skb_priorities' map. Used to maintain a map from
486 * 'priority' (the datapath's term for QoS queue) to the dscp bits which all
487 * traffic egressing the 'ofport' with that priority should be marked with. */
488struct skb_priority_to_dscp {
489 struct hmap_node hmap_node; /* Node in 'ofport_dpif''s 'skb_priorities'. */
490 uint32_t skb_priority; /* Priority of this queue (see struct flow). */
491
492 uint8_t dscp; /* DSCP bits to mark outgoing traffic with. */
493};
494
84f0f298
RW
495/* Xlate config contains hash maps of all bridges, bundles and ports.
496 * Xcfgp contains the pointer to the current xlate configuration.
497 * When the main thread needs to change the configuration, it copies xcfgp to
498 * new_xcfg and edits new_xcfg. This enables the use of RCU locking which
499 * does not block handler and revalidator threads. */
500struct xlate_cfg {
501 struct hmap xbridges;
502 struct hmap xbundles;
503 struct hmap xports;
504};
b1b72f2d 505static OVSRCU_TYPE(struct xlate_cfg *) xcfgp = OVSRCU_INITIALIZER(NULL);
f439f23b 506static struct xlate_cfg *new_xcfg = NULL;
46c88433
EJ
507
508static bool may_receive(const struct xport *, struct xlate_ctx *);
9583bc14
EJ
509static void do_xlate_actions(const struct ofpact *, size_t ofpacts_len,
510 struct xlate_ctx *);
adcf00ba 511static void xlate_normal(struct xlate_ctx *);
6d328fa2
SH
512static void xlate_table_action(struct xlate_ctx *, ofp_port_t in_port,
513 uint8_t table_id, bool may_packet_in,
2cd20955 514 bool honor_table_miss, bool with_ct_orig);
2d9b49dd
BP
515static bool input_vid_is_valid(const struct xlate_ctx *,
516 uint16_t vid, struct xbundle *);
f0fb825a
EG
517static void xvlan_copy(struct xvlan *dst, const struct xvlan *src);
518static void xvlan_pop(struct xvlan *src);
fed8962a 519static void xvlan_push_uninit(struct xvlan *src);
f0fb825a
EG
520static void xvlan_extract(const struct flow *, struct xvlan *);
521static void xvlan_put(struct flow *, const struct xvlan *);
522static void xvlan_input_translate(const struct xbundle *,
523 const struct xvlan *in,
524 struct xvlan *xvlan);
525static void xvlan_output_translate(const struct xbundle *,
526 const struct xvlan *xvlan,
527 struct xvlan *out);
46c88433 528static void output_normal(struct xlate_ctx *, const struct xbundle *,
f0fb825a 529 const struct xvlan *);
e93ef1c7
JR
530
531/* Optional bond recirculation parameter to compose_output_action(). */
532struct xlate_bond_recirc {
533 uint32_t recirc_id; /* !0 Use recirculation instead of output. */
534 uint8_t hash_alg; /* !0 Compute hash for recirc before. */
535 uint32_t hash_basis; /* Compute hash for recirc before. */
536};
537
538static void compose_output_action(struct xlate_ctx *, ofp_port_t ofp_port,
539 const struct xlate_bond_recirc *xr);
9583bc14 540
84f0f298
RW
541static struct xbridge *xbridge_lookup(struct xlate_cfg *,
542 const struct ofproto_dpif *);
290835f9
BP
543static struct xbridge *xbridge_lookup_by_uuid(struct xlate_cfg *,
544 const struct uuid *);
84f0f298
RW
545static struct xbundle *xbundle_lookup(struct xlate_cfg *,
546 const struct ofbundle *);
547static struct xport *xport_lookup(struct xlate_cfg *,
548 const struct ofport_dpif *);
46c88433 549static struct xport *get_ofp_port(const struct xbridge *, ofp_port_t ofp_port);
55954f6e
EJ
550static struct skb_priority_to_dscp *get_skb_priority(const struct xport *,
551 uint32_t skb_priority);
552static void clear_skb_priorities(struct xport *);
16194afd 553static size_t count_skb_priorities(const struct xport *);
55954f6e
EJ
554static bool dscp_from_skb_priority(const struct xport *, uint32_t skb_priority,
555 uint8_t *dscp);
46c88433 556
84f0f298
RW
557static void xlate_xbridge_init(struct xlate_cfg *, struct xbridge *);
558static void xlate_xbundle_init(struct xlate_cfg *, struct xbundle *);
559static void xlate_xport_init(struct xlate_cfg *, struct xport *);
9efd308e 560static void xlate_xbridge_set(struct xbridge *, struct dpif *,
9efd308e
DV
561 const struct mac_learning *, struct stp *,
562 struct rstp *, const struct mcast_snooping *,
563 const struct mbridge *,
564 const struct dpif_sflow *,
565 const struct dpif_ipfix *,
2f47cdf4 566 const struct netflow *,
84f0f298 567 bool forward_bpdu, bool has_in_band,
b440dd8c 568 const struct dpif_backer_support *);
84f0f298 569static void xlate_xbundle_set(struct xbundle *xbundle,
f0fb825a 570 enum port_vlan_mode vlan_mode,
fed8962a
EG
571 uint16_t qinq_ethtype, int vlan,
572 unsigned long *trunks, unsigned long *cvlans,
f0fb825a 573 bool use_priority_tags,
84f0f298 574 const struct bond *bond, const struct lacp *lacp,
c005f976 575 bool floodable, bool protected);
84f0f298
RW
576static void xlate_xport_set(struct xport *xport, odp_port_t odp_port,
577 const struct netdev *netdev, const struct cfm *cfm,
0477baa9
DF
578 const struct bfd *bfd, const struct lldp *lldp,
579 int stp_port_no, const struct rstp_port *rstp_port,
84f0f298
RW
580 enum ofputil_port_config config,
581 enum ofputil_port_state state, bool is_tunnel,
582 bool may_enable);
583static void xlate_xbridge_remove(struct xlate_cfg *, struct xbridge *);
584static void xlate_xbundle_remove(struct xlate_cfg *, struct xbundle *);
585static void xlate_xport_remove(struct xlate_cfg *, struct xport *);
586static void xlate_xbridge_copy(struct xbridge *);
587static void xlate_xbundle_copy(struct xbridge *, struct xbundle *);
588static void xlate_xport_copy(struct xbridge *, struct xbundle *,
589 struct xport *);
590static void xlate_xcfg_free(struct xlate_cfg *);
2d9b49dd
BP
591\f
592/* Tracing helpers. */
593
594/* If tracing is enabled in 'ctx', creates a new trace node and appends it to
595 * the list of nodes maintained in ctx->xin. The new node has type 'type' and
596 * its text is created from 'format' by treating it as a printf format string.
597 * Returns the list of nodes embedded within the new trace node; ordinarily,
598 * the calleer can ignore this, but it is useful if the caller needs to nest
599 * more trace nodes within the new node.
600 *
601 * If tracing is not enabled, does nothing and returns NULL. */
602static struct ovs_list * OVS_PRINTF_FORMAT(3, 4)
603xlate_report(const struct xlate_ctx *ctx, enum oftrace_node_type type,
604 const char *format, ...)
34dd0d78 605{
2d9b49dd
BP
606 struct ovs_list *subtrace = NULL;
607 if (OVS_UNLIKELY(ctx->xin->trace)) {
c1b3756c 608 va_list args;
c1b3756c 609 va_start(args, format);
2d9b49dd
BP
610 char *text = xvasprintf(format, args);
611 subtrace = &oftrace_report(ctx->xin->trace, type, text)->subs;
c1b3756c 612 va_end(args);
2d9b49dd 613 free(text);
34dd0d78 614 }
2d9b49dd 615 return subtrace;
34dd0d78 616}
84f0f298 617
2d9b49dd
BP
618/* This is like xlate_report() for errors that are serious enough that we
619 * should log them even if we are not tracing. */
620static void OVS_PRINTF_FORMAT(2, 3)
621xlate_report_error(const struct xlate_ctx *ctx, const char *format, ...)
622{
623 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
624 if (!OVS_UNLIKELY(ctx->xin->trace)
625 && (!ctx->xin->packet || VLOG_DROP_WARN(&rl))) {
626 return;
627 }
628
629 struct ds s = DS_EMPTY_INITIALIZER;
630 va_list args;
631 va_start(args, format);
632 ds_put_format_valist(&s, format, args);
633 va_end(args);
634
635 if (ctx->xin->trace) {
636 oftrace_report(ctx->xin->trace, OFT_ERROR, ds_cstr(&s));
637 } else {
638 ds_put_cstr(&s, " while processing ");
50f96b10 639 flow_format(&s, &ctx->base_flow, NULL);
2d9b49dd
BP
640 ds_put_format(&s, " on bridge %s", ctx->xbridge->name);
641 VLOG_WARN("%s", ds_cstr(&s));
642 }
643 ds_destroy(&s);
644}
645
646/* This is like xlate_report() for messages that should be logged at debug
647 * level (even if we are not tracing) because they can be valuable for
648 * debugging. */
649static void OVS_PRINTF_FORMAT(3, 4)
650xlate_report_debug(const struct xlate_ctx *ctx, enum oftrace_node_type type,
651 const char *format, ...)
652{
653 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(30, 300);
654 if (!OVS_UNLIKELY(ctx->xin->trace)
655 && (!ctx->xin->packet || VLOG_DROP_DBG(&rl))) {
656 return;
657 }
658
659 struct ds s = DS_EMPTY_INITIALIZER;
660 va_list args;
661 va_start(args, format);
662 ds_put_format_valist(&s, format, args);
663 va_end(args);
fff1b9c0 664
2d9b49dd
BP
665 if (ctx->xin->trace) {
666 oftrace_report(ctx->xin->trace, type, ds_cstr(&s));
667 } else {
668 VLOG_DBG("bridge %s: %s", ctx->xbridge->name, ds_cstr(&s));
669 }
670 ds_destroy(&s);
671}
fff1b9c0 672
2d9b49dd
BP
673/* If tracing is enabled in 'ctx', appends a node of the given 'type' to the
674 * trace, whose text is 'title' followed by a formatted version of the
675 * 'ofpacts_len' OpenFlow actions in 'ofpacts'.
676 *
677 * If tracing is not enabled, does nothing. */
678static void
679xlate_report_actions(const struct xlate_ctx *ctx, enum oftrace_node_type type,
680 const char *title,
d6bef3cc
BP
681 const struct ofpact *ofpacts, size_t ofpacts_len)
682{
2d9b49dd 683 if (OVS_UNLIKELY(ctx->xin->trace)) {
d6bef3cc 684 struct ds s = DS_EMPTY_INITIALIZER;
2d9b49dd 685 ds_put_format(&s, "%s: ", title);
50f96b10 686 ofpacts_format(ofpacts, ofpacts_len, NULL, &s);
2d9b49dd 687 oftrace_report(ctx->xin->trace, type, ds_cstr(&s));
d6bef3cc
BP
688 ds_destroy(&s);
689 }
690}
691
2d9b49dd
BP
692/* If tracing is enabled in 'ctx', appends a node of type OFT_DETAIL to the
693 * trace, whose the message is a formatted version of the OpenFlow action set.
694 * 'verb' should be "was" or "is", depending on whether the action set reported
695 * is the new action set or the old one.
696 *
697 * If tracing is not enabled, does nothing. */
698static void
699xlate_report_action_set(const struct xlate_ctx *ctx, const char *verb)
700{
701 if (OVS_UNLIKELY(ctx->xin->trace)) {
702 struct ofpbuf action_list;
703 ofpbuf_init(&action_list, 0);
704 ofpacts_execute_action_set(&action_list, &ctx->action_set);
705 if (action_list.size) {
706 struct ds s = DS_EMPTY_INITIALIZER;
50f96b10 707 ofpacts_format(action_list.data, action_list.size, NULL, &s);
2d9b49dd
BP
708 xlate_report(ctx, OFT_DETAIL, "action set %s: %s",
709 verb, ds_cstr(&s));
710 ds_destroy(&s);
711 } else {
712 xlate_report(ctx, OFT_DETAIL, "action set %s empty", verb);
713 }
714 ofpbuf_uninit(&action_list);
715 }
716}
717
718
719/* If tracing is enabled in 'ctx', appends a node representing 'rule' (in
720 * OpenFlow table 'table_id') to the trace and makes this node the parent for
721 * future trace nodes. The caller should save ctx->xin->trace before calling
722 * this function, then after tracing all of the activities under the table,
723 * restore its previous value.
724 *
725 * If tracing is not enabled, does nothing. */
726static void
727xlate_report_table(const struct xlate_ctx *ctx, struct rule_dpif *rule,
728 uint8_t table_id)
729{
730 if (OVS_LIKELY(!ctx->xin->trace)) {
731 return;
732 }
733
734 struct ds s = DS_EMPTY_INITIALIZER;
735 ds_put_format(&s, "%2d. ", table_id);
736 if (rule == ctx->xin->ofproto->miss_rule) {
737 ds_put_cstr(&s, "No match, and a \"packet-in\" is called for.");
738 } else if (rule == ctx->xin->ofproto->no_packet_in_rule) {
739 ds_put_cstr(&s, "No match.");
740 } else if (rule == ctx->xin->ofproto->drop_frags_rule) {
741 ds_put_cstr(&s, "Packets are IP fragments and "
742 "the fragment handling mode is \"drop\".");
743 } else {
744 minimatch_format(&rule->up.cr.match,
745 ofproto_get_tun_tab(&ctx->xin->ofproto->up),
50f96b10 746 NULL, &s, OFP_DEFAULT_PRIORITY);
2d9b49dd
BP
747 if (ds_last(&s) != ' ') {
748 ds_put_cstr(&s, ", ");
749 }
750 ds_put_format(&s, "priority %d", rule->up.cr.priority);
751 if (rule->up.flow_cookie) {
752 ds_put_format(&s, ", cookie %#"PRIx64,
753 ntohll(rule->up.flow_cookie));
754 }
755 }
756 ctx->xin->trace = &oftrace_report(ctx->xin->trace, OFT_TABLE,
757 ds_cstr(&s))->subs;
758 ds_destroy(&s);
759}
760
761/* If tracing is enabled in 'ctx', adds an OFT_DETAIL trace node to 'ctx'
762 * reporting the value of subfield 'sf'.
763 *
764 * If tracing is not enabled, does nothing. */
765static void
766xlate_report_subfield(const struct xlate_ctx *ctx,
767 const struct mf_subfield *sf)
768{
769 if (OVS_UNLIKELY(ctx->xin->trace)) {
770 struct ds s = DS_EMPTY_INITIALIZER;
771 mf_format_subfield(sf, &s);
772 ds_put_cstr(&s, " is now ");
773
774 if (sf->ofs == 0 && sf->n_bits >= sf->field->n_bits) {
775 union mf_value value;
776 mf_get_value(sf->field, &ctx->xin->flow, &value);
50f96b10 777 mf_format(sf->field, &value, NULL, NULL, &s);
2d9b49dd
BP
778 } else {
779 union mf_subvalue cst;
780 mf_read_subfield(sf, &ctx->xin->flow, &cst);
781 ds_put_hex(&s, &cst, sizeof cst);
782 }
783
784 xlate_report(ctx, OFT_DETAIL, "%s", ds_cstr(&s));
785
786 ds_destroy(&s);
787 }
788}
789\f
84f0f298
RW
790static void
791xlate_xbridge_init(struct xlate_cfg *xcfg, struct xbridge *xbridge)
792{
417e7e66 793 ovs_list_init(&xbridge->xbundles);
84f0f298
RW
794 hmap_init(&xbridge->xports);
795 hmap_insert(&xcfg->xbridges, &xbridge->hmap_node,
796 hash_pointer(xbridge->ofproto, 0));
797}
798
799static void
800xlate_xbundle_init(struct xlate_cfg *xcfg, struct xbundle *xbundle)
801{
417e7e66
BW
802 ovs_list_init(&xbundle->xports);
803 ovs_list_insert(&xbundle->xbridge->xbundles, &xbundle->list_node);
84f0f298
RW
804 hmap_insert(&xcfg->xbundles, &xbundle->hmap_node,
805 hash_pointer(xbundle->ofbundle, 0));
806}
807
808static void
809xlate_xport_init(struct xlate_cfg *xcfg, struct xport *xport)
810{
811 hmap_init(&xport->skb_priorities);
812 hmap_insert(&xcfg->xports, &xport->hmap_node,
813 hash_pointer(xport->ofport, 0));
814 hmap_insert(&xport->xbridge->xports, &xport->ofp_node,
815 hash_ofp_port(xport->ofp_port));
816}
817
818static void
819xlate_xbridge_set(struct xbridge *xbridge,
820 struct dpif *dpif,
ec89fc6f 821 const struct mac_learning *ml, struct stp *stp,
9efd308e 822 struct rstp *rstp, const struct mcast_snooping *ms,
ec89fc6f 823 const struct mbridge *mbridge,
46c88433 824 const struct dpif_sflow *sflow,
ce3955be 825 const struct dpif_ipfix *ipfix,
2f47cdf4 826 const struct netflow *netflow,
4b97b70d 827 bool forward_bpdu, bool has_in_band,
b440dd8c 828 const struct dpif_backer_support *support)
46c88433 829{
46c88433
EJ
830 if (xbridge->ml != ml) {
831 mac_learning_unref(xbridge->ml);
832 xbridge->ml = mac_learning_ref(ml);
833 }
834
6d95c4e8
FL
835 if (xbridge->ms != ms) {
836 mcast_snooping_unref(xbridge->ms);
837 xbridge->ms = mcast_snooping_ref(ms);
838 }
839
46c88433
EJ
840 if (xbridge->mbridge != mbridge) {
841 mbridge_unref(xbridge->mbridge);
842 xbridge->mbridge = mbridge_ref(mbridge);
843 }
844
845 if (xbridge->sflow != sflow) {
846 dpif_sflow_unref(xbridge->sflow);
847 xbridge->sflow = dpif_sflow_ref(sflow);
848 }
849
850 if (xbridge->ipfix != ipfix) {
851 dpif_ipfix_unref(xbridge->ipfix);
852 xbridge->ipfix = dpif_ipfix_ref(ipfix);
853 }
854
9d189a50
EJ
855 if (xbridge->stp != stp) {
856 stp_unref(xbridge->stp);
857 xbridge->stp = stp_ref(stp);
858 }
859
9efd308e
DV
860 if (xbridge->rstp != rstp) {
861 rstp_unref(xbridge->rstp);
862 xbridge->rstp = rstp_ref(rstp);
863 }
864
ce3955be
EJ
865 if (xbridge->netflow != netflow) {
866 netflow_unref(xbridge->netflow);
867 xbridge->netflow = netflow_ref(netflow);
868 }
869
89a8a7f0 870 xbridge->dpif = dpif;
46c88433
EJ
871 xbridge->forward_bpdu = forward_bpdu;
872 xbridge->has_in_band = has_in_band;
b440dd8c 873 xbridge->support = *support;
46c88433
EJ
874}
875
84f0f298
RW
876static void
877xlate_xbundle_set(struct xbundle *xbundle,
fed8962a
EG
878 enum port_vlan_mode vlan_mode, uint16_t qinq_ethtype,
879 int vlan, unsigned long *trunks, unsigned long *cvlans,
f0fb825a 880 bool use_priority_tags,
84f0f298 881 const struct bond *bond, const struct lacp *lacp,
c005f976 882 bool floodable, bool protected)
84f0f298
RW
883{
884 ovs_assert(xbundle->xbridge);
885
886 xbundle->vlan_mode = vlan_mode;
fed8962a 887 xbundle->qinq_ethtype = qinq_ethtype;
84f0f298
RW
888 xbundle->vlan = vlan;
889 xbundle->trunks = trunks;
fed8962a 890 xbundle->cvlans = cvlans;
84f0f298
RW
891 xbundle->use_priority_tags = use_priority_tags;
892 xbundle->floodable = floodable;
c005f976 893 xbundle->protected = protected;
84f0f298
RW
894
895 if (xbundle->bond != bond) {
896 bond_unref(xbundle->bond);
897 xbundle->bond = bond_ref(bond);
898 }
899
900 if (xbundle->lacp != lacp) {
901 lacp_unref(xbundle->lacp);
902 xbundle->lacp = lacp_ref(lacp);
903 }
904}
905
906static void
907xlate_xport_set(struct xport *xport, odp_port_t odp_port,
908 const struct netdev *netdev, const struct cfm *cfm,
0477baa9 909 const struct bfd *bfd, const struct lldp *lldp, int stp_port_no,
f025bcb7 910 const struct rstp_port* rstp_port,
84f0f298
RW
911 enum ofputil_port_config config, enum ofputil_port_state state,
912 bool is_tunnel, bool may_enable)
913{
914 xport->config = config;
915 xport->state = state;
916 xport->stp_port_no = stp_port_no;
917 xport->is_tunnel = is_tunnel;
875ab130 918 xport->pt_mode = netdev_get_pt_mode(netdev);
84f0f298
RW
919 xport->may_enable = may_enable;
920 xport->odp_port = odp_port;
921
f025bcb7
JR
922 if (xport->rstp_port != rstp_port) {
923 rstp_port_unref(xport->rstp_port);
924 xport->rstp_port = rstp_port_ref(rstp_port);
925 }
926
84f0f298
RW
927 if (xport->cfm != cfm) {
928 cfm_unref(xport->cfm);
929 xport->cfm = cfm_ref(cfm);
930 }
931
932 if (xport->bfd != bfd) {
933 bfd_unref(xport->bfd);
934 xport->bfd = bfd_ref(bfd);
935 }
936
0477baa9
DF
937 if (xport->lldp != lldp) {
938 lldp_unref(xport->lldp);
939 xport->lldp = lldp_ref(lldp);
940 }
941
84f0f298
RW
942 if (xport->netdev != netdev) {
943 netdev_close(xport->netdev);
944 xport->netdev = netdev_ref(netdev);
945 }
946}
947
948static void
949xlate_xbridge_copy(struct xbridge *xbridge)
950{
951 struct xbundle *xbundle;
952 struct xport *xport;
953 struct xbridge *new_xbridge = xzalloc(sizeof *xbridge);
954 new_xbridge->ofproto = xbridge->ofproto;
955 new_xbridge->name = xstrdup(xbridge->name);
956 xlate_xbridge_init(new_xcfg, new_xbridge);
957
958 xlate_xbridge_set(new_xbridge,
34dd0d78 959 xbridge->dpif, xbridge->ml, xbridge->stp,
9efd308e
DV
960 xbridge->rstp, xbridge->ms, xbridge->mbridge,
961 xbridge->sflow, xbridge->ipfix, xbridge->netflow,
b440dd8c
JS
962 xbridge->forward_bpdu, xbridge->has_in_band,
963 &xbridge->support);
84f0f298
RW
964 LIST_FOR_EACH (xbundle, list_node, &xbridge->xbundles) {
965 xlate_xbundle_copy(new_xbridge, xbundle);
966 }
967
968 /* Copy xports which are not part of a xbundle */
969 HMAP_FOR_EACH (xport, ofp_node, &xbridge->xports) {
970 if (!xport->xbundle) {
971 xlate_xport_copy(new_xbridge, NULL, xport);
972 }
973 }
974}
975
976static void
977xlate_xbundle_copy(struct xbridge *xbridge, struct xbundle *xbundle)
978{
979 struct xport *xport;
980 struct xbundle *new_xbundle = xzalloc(sizeof *xbundle);
981 new_xbundle->ofbundle = xbundle->ofbundle;
982 new_xbundle->xbridge = xbridge;
983 new_xbundle->name = xstrdup(xbundle->name);
984 xlate_xbundle_init(new_xcfg, new_xbundle);
985
fed8962a
EG
986 xlate_xbundle_set(new_xbundle, xbundle->vlan_mode, xbundle->qinq_ethtype,
987 xbundle->vlan, xbundle->trunks, xbundle->cvlans,
84f0f298 988 xbundle->use_priority_tags, xbundle->bond, xbundle->lacp,
c005f976 989 xbundle->floodable, xbundle->protected);
84f0f298
RW
990 LIST_FOR_EACH (xport, bundle_node, &xbundle->xports) {
991 xlate_xport_copy(xbridge, new_xbundle, xport);
992 }
993}
994
995static void
996xlate_xport_copy(struct xbridge *xbridge, struct xbundle *xbundle,
997 struct xport *xport)
998{
999 struct skb_priority_to_dscp *pdscp, *new_pdscp;
1000 struct xport *new_xport = xzalloc(sizeof *xport);
1001 new_xport->ofport = xport->ofport;
1002 new_xport->ofp_port = xport->ofp_port;
1003 new_xport->xbridge = xbridge;
1004 xlate_xport_init(new_xcfg, new_xport);
1005
1006 xlate_xport_set(new_xport, xport->odp_port, xport->netdev, xport->cfm,
0477baa9
DF
1007 xport->bfd, xport->lldp, xport->stp_port_no,
1008 xport->rstp_port, xport->config, xport->state,
1009 xport->is_tunnel, xport->may_enable);
84f0f298
RW
1010
1011 if (xport->peer) {
1012 struct xport *peer = xport_lookup(new_xcfg, xport->peer->ofport);
1013 if (peer) {
1014 new_xport->peer = peer;
1015 new_xport->peer->peer = new_xport;
1016 }
1017 }
1018
1019 if (xbundle) {
1020 new_xport->xbundle = xbundle;
417e7e66 1021 ovs_list_insert(&new_xport->xbundle->xports, &new_xport->bundle_node);
84f0f298
RW
1022 }
1023
1024 HMAP_FOR_EACH (pdscp, hmap_node, &xport->skb_priorities) {
1025 new_pdscp = xmalloc(sizeof *pdscp);
1026 new_pdscp->skb_priority = pdscp->skb_priority;
1027 new_pdscp->dscp = pdscp->dscp;
1028 hmap_insert(&new_xport->skb_priorities, &new_pdscp->hmap_node,
1029 hash_int(new_pdscp->skb_priority, 0));
1030 }
1031}
1032
1033/* Sets the current xlate configuration to new_xcfg and frees the old xlate
1034 * configuration in xcfgp.
1035 *
1036 * This needs to be called after editing the xlate configuration.
1037 *
1038 * Functions that edit the new xlate configuration are
6cd20a22 1039 * xlate_<ofproto/bundle/ofport>_set and xlate_<ofproto/bundle/ofport>_remove.
84f0f298
RW
1040 *
1041 * A sample workflow:
1042 *
1043 * xlate_txn_start();
1044 * ...
1045 * edit_xlate_configuration();
1046 * ...
1047 * xlate_txn_commit(); */
46c88433 1048void
84f0f298
RW
1049xlate_txn_commit(void)
1050{
1051 struct xlate_cfg *xcfg = ovsrcu_get(struct xlate_cfg *, &xcfgp);
1052
1053 ovsrcu_set(&xcfgp, new_xcfg);
40a9c4c2
AW
1054 ovsrcu_synchronize();
1055 xlate_xcfg_free(xcfg);
84f0f298
RW
1056 new_xcfg = NULL;
1057}
1058
1059/* Copies the current xlate configuration in xcfgp to new_xcfg.
1060 *
1061 * This needs to be called prior to editing the xlate configuration. */
1062void
1063xlate_txn_start(void)
1064{
1065 struct xbridge *xbridge;
1066 struct xlate_cfg *xcfg;
1067
1068 ovs_assert(!new_xcfg);
1069
1070 new_xcfg = xmalloc(sizeof *new_xcfg);
1071 hmap_init(&new_xcfg->xbridges);
1072 hmap_init(&new_xcfg->xbundles);
1073 hmap_init(&new_xcfg->xports);
1074
1075 xcfg = ovsrcu_get(struct xlate_cfg *, &xcfgp);
1076 if (!xcfg) {
1077 return;
1078 }
1079
1080 HMAP_FOR_EACH (xbridge, hmap_node, &xcfg->xbridges) {
1081 xlate_xbridge_copy(xbridge);
1082 }
1083}
1084
1085
1086static void
1087xlate_xcfg_free(struct xlate_cfg *xcfg)
1088{
1089 struct xbridge *xbridge, *next_xbridge;
1090
1091 if (!xcfg) {
1092 return;
1093 }
1094
1095 HMAP_FOR_EACH_SAFE (xbridge, next_xbridge, hmap_node, &xcfg->xbridges) {
1096 xlate_xbridge_remove(xcfg, xbridge);
1097 }
1098
1099 hmap_destroy(&xcfg->xbridges);
1100 hmap_destroy(&xcfg->xbundles);
1101 hmap_destroy(&xcfg->xports);
1102 free(xcfg);
1103}
1104
1105void
1106xlate_ofproto_set(struct ofproto_dpif *ofproto, const char *name,
34dd0d78 1107 struct dpif *dpif,
84f0f298 1108 const struct mac_learning *ml, struct stp *stp,
9efd308e 1109 struct rstp *rstp, const struct mcast_snooping *ms,
84f0f298
RW
1110 const struct mbridge *mbridge,
1111 const struct dpif_sflow *sflow,
1112 const struct dpif_ipfix *ipfix,
2f47cdf4 1113 const struct netflow *netflow,
b440dd8c
JS
1114 bool forward_bpdu, bool has_in_band,
1115 const struct dpif_backer_support *support)
84f0f298
RW
1116{
1117 struct xbridge *xbridge;
1118
1119 ovs_assert(new_xcfg);
1120
1121 xbridge = xbridge_lookup(new_xcfg, ofproto);
1122 if (!xbridge) {
1123 xbridge = xzalloc(sizeof *xbridge);
1124 xbridge->ofproto = ofproto;
1125
1126 xlate_xbridge_init(new_xcfg, xbridge);
1127 }
1128
1129 free(xbridge->name);
1130 xbridge->name = xstrdup(name);
1131
34dd0d78 1132 xlate_xbridge_set(xbridge, dpif, ml, stp, rstp, ms, mbridge, sflow, ipfix,
b440dd8c 1133 netflow, forward_bpdu, has_in_band, support);
84f0f298
RW
1134}
1135
1136static void
1137xlate_xbridge_remove(struct xlate_cfg *xcfg, struct xbridge *xbridge)
46c88433 1138{
46c88433
EJ
1139 struct xbundle *xbundle, *next_xbundle;
1140 struct xport *xport, *next_xport;
1141
1142 if (!xbridge) {
1143 return;
1144 }
1145
1146 HMAP_FOR_EACH_SAFE (xport, next_xport, ofp_node, &xbridge->xports) {
84f0f298 1147 xlate_xport_remove(xcfg, xport);
46c88433
EJ
1148 }
1149
1150 LIST_FOR_EACH_SAFE (xbundle, next_xbundle, list_node, &xbridge->xbundles) {
84f0f298 1151 xlate_xbundle_remove(xcfg, xbundle);
46c88433
EJ
1152 }
1153
84f0f298 1154 hmap_remove(&xcfg->xbridges, &xbridge->hmap_node);
795cc5c1 1155 mac_learning_unref(xbridge->ml);
6d95c4e8 1156 mcast_snooping_unref(xbridge->ms);
795cc5c1
EJ
1157 mbridge_unref(xbridge->mbridge);
1158 dpif_sflow_unref(xbridge->sflow);
1159 dpif_ipfix_unref(xbridge->ipfix);
3570f7e4 1160 netflow_unref(xbridge->netflow);
795cc5c1 1161 stp_unref(xbridge->stp);
9efd308e 1162 rstp_unref(xbridge->rstp);
795cc5c1 1163 hmap_destroy(&xbridge->xports);
46c88433
EJ
1164 free(xbridge->name);
1165 free(xbridge);
1166}
1167
84f0f298
RW
1168void
1169xlate_remove_ofproto(struct ofproto_dpif *ofproto)
1170{
1171 struct xbridge *xbridge;
1172
1173 ovs_assert(new_xcfg);
1174
1175 xbridge = xbridge_lookup(new_xcfg, ofproto);
1176 xlate_xbridge_remove(new_xcfg, xbridge);
1177}
1178
46c88433
EJ
1179void
1180xlate_bundle_set(struct ofproto_dpif *ofproto, struct ofbundle *ofbundle,
f0fb825a 1181 const char *name, enum port_vlan_mode vlan_mode,
fed8962a
EG
1182 uint16_t qinq_ethtype, int vlan,
1183 unsigned long *trunks, unsigned long *cvlans,
f0fb825a 1184 bool use_priority_tags,
46c88433 1185 const struct bond *bond, const struct lacp *lacp,
c005f976 1186 bool floodable, bool protected)
46c88433 1187{
84f0f298 1188 struct xbundle *xbundle;
46c88433 1189
84f0f298
RW
1190 ovs_assert(new_xcfg);
1191
1192 xbundle = xbundle_lookup(new_xcfg, ofbundle);
46c88433
EJ
1193 if (!xbundle) {
1194 xbundle = xzalloc(sizeof *xbundle);
1195 xbundle->ofbundle = ofbundle;
84f0f298 1196 xbundle->xbridge = xbridge_lookup(new_xcfg, ofproto);
46c88433 1197
84f0f298 1198 xlate_xbundle_init(new_xcfg, xbundle);
46c88433
EJ
1199 }
1200
46c88433
EJ
1201 free(xbundle->name);
1202 xbundle->name = xstrdup(name);
1203
fed8962a 1204 xlate_xbundle_set(xbundle, vlan_mode, qinq_ethtype, vlan, trunks, cvlans,
c005f976 1205 use_priority_tags, bond, lacp, floodable, protected);
46c88433
EJ
1206}
1207
84f0f298
RW
1208static void
1209xlate_xbundle_remove(struct xlate_cfg *xcfg, struct xbundle *xbundle)
46c88433 1210{
5f03c983 1211 struct xport *xport;
46c88433
EJ
1212
1213 if (!xbundle) {
1214 return;
1215 }
1216
5f03c983 1217 LIST_FOR_EACH_POP (xport, bundle_node, &xbundle->xports) {
46c88433
EJ
1218 xport->xbundle = NULL;
1219 }
1220
84f0f298 1221 hmap_remove(&xcfg->xbundles, &xbundle->hmap_node);
417e7e66 1222 ovs_list_remove(&xbundle->list_node);
46c88433
EJ
1223 bond_unref(xbundle->bond);
1224 lacp_unref(xbundle->lacp);
1225 free(xbundle->name);
1226 free(xbundle);
1227}
1228
84f0f298
RW
1229void
1230xlate_bundle_remove(struct ofbundle *ofbundle)
1231{
1232 struct xbundle *xbundle;
1233
1234 ovs_assert(new_xcfg);
1235
1236 xbundle = xbundle_lookup(new_xcfg, ofbundle);
1237 xlate_xbundle_remove(new_xcfg, xbundle);
1238}
1239
46c88433
EJ
1240void
1241xlate_ofport_set(struct ofproto_dpif *ofproto, struct ofbundle *ofbundle,
1242 struct ofport_dpif *ofport, ofp_port_t ofp_port,
1243 odp_port_t odp_port, const struct netdev *netdev,
1244 const struct cfm *cfm, const struct bfd *bfd,
0477baa9
DF
1245 const struct lldp *lldp, struct ofport_dpif *peer,
1246 int stp_port_no, const struct rstp_port *rstp_port,
55954f6e 1247 const struct ofproto_port_queue *qdscp_list, size_t n_qdscp,
dd8cd4b4
SH
1248 enum ofputil_port_config config,
1249 enum ofputil_port_state state, bool is_tunnel,
9d189a50 1250 bool may_enable)
46c88433 1251{
55954f6e 1252 size_t i;
84f0f298
RW
1253 struct xport *xport;
1254
1255 ovs_assert(new_xcfg);
46c88433 1256
84f0f298 1257 xport = xport_lookup(new_xcfg, ofport);
46c88433
EJ
1258 if (!xport) {
1259 xport = xzalloc(sizeof *xport);
1260 xport->ofport = ofport;
84f0f298 1261 xport->xbridge = xbridge_lookup(new_xcfg, ofproto);
46c88433
EJ
1262 xport->ofp_port = ofp_port;
1263
84f0f298 1264 xlate_xport_init(new_xcfg, xport);
46c88433
EJ
1265 }
1266
1267 ovs_assert(xport->ofp_port == ofp_port);
1268
0477baa9
DF
1269 xlate_xport_set(xport, odp_port, netdev, cfm, bfd, lldp,
1270 stp_port_no, rstp_port, config, state, is_tunnel,
1271 may_enable);
46c88433
EJ
1272
1273 if (xport->peer) {
1274 xport->peer->peer = NULL;
1275 }
84f0f298 1276 xport->peer = xport_lookup(new_xcfg, peer);
46c88433
EJ
1277 if (xport->peer) {
1278 xport->peer->peer = xport;
1279 }
1280
1281 if (xport->xbundle) {
417e7e66 1282 ovs_list_remove(&xport->bundle_node);
46c88433 1283 }
84f0f298 1284 xport->xbundle = xbundle_lookup(new_xcfg, ofbundle);
46c88433 1285 if (xport->xbundle) {
417e7e66 1286 ovs_list_insert(&xport->xbundle->xports, &xport->bundle_node);
46c88433 1287 }
55954f6e
EJ
1288
1289 clear_skb_priorities(xport);
1290 for (i = 0; i < n_qdscp; i++) {
1291 struct skb_priority_to_dscp *pdscp;
1292 uint32_t skb_priority;
1293
89a8a7f0
EJ
1294 if (dpif_queue_to_priority(xport->xbridge->dpif, qdscp_list[i].queue,
1295 &skb_priority)) {
55954f6e
EJ
1296 continue;
1297 }
1298
1299 pdscp = xmalloc(sizeof *pdscp);
1300 pdscp->skb_priority = skb_priority;
1301 pdscp->dscp = (qdscp_list[i].dscp << 2) & IP_DSCP_MASK;
1302 hmap_insert(&xport->skb_priorities, &pdscp->hmap_node,
1303 hash_int(pdscp->skb_priority, 0));
1304 }
46c88433
EJ
1305}
1306
84f0f298
RW
1307static void
1308xlate_xport_remove(struct xlate_cfg *xcfg, struct xport *xport)
46c88433 1309{
46c88433
EJ
1310 if (!xport) {
1311 return;
1312 }
1313
1314 if (xport->peer) {
1315 xport->peer->peer = NULL;
1316 xport->peer = NULL;
1317 }
1318
e621a12d 1319 if (xport->xbundle) {
417e7e66 1320 ovs_list_remove(&xport->bundle_node);
e621a12d
EJ
1321 }
1322
55954f6e
EJ
1323 clear_skb_priorities(xport);
1324 hmap_destroy(&xport->skb_priorities);
1325
84f0f298 1326 hmap_remove(&xcfg->xports, &xport->hmap_node);
46c88433
EJ
1327 hmap_remove(&xport->xbridge->xports, &xport->ofp_node);
1328
1329 netdev_close(xport->netdev);
f025bcb7 1330 rstp_port_unref(xport->rstp_port);
46c88433
EJ
1331 cfm_unref(xport->cfm);
1332 bfd_unref(xport->bfd);
0477baa9 1333 lldp_unref(xport->lldp);
46c88433
EJ
1334 free(xport);
1335}
1336
84f0f298
RW
1337void
1338xlate_ofport_remove(struct ofport_dpif *ofport)
1339{
1340 struct xport *xport;
1341
1342 ovs_assert(new_xcfg);
1343
1344 xport = xport_lookup(new_xcfg, ofport);
1345 xlate_xport_remove(new_xcfg, xport);
1346}
1347
ef377a58
JR
1348static struct ofproto_dpif *
1349xlate_lookup_ofproto_(const struct dpif_backer *backer, const struct flow *flow,
1350 ofp_port_t *ofp_in_port, const struct xport **xportp)
1351{
e672ff9b 1352 struct xlate_cfg *xcfg = ovsrcu_get(struct xlate_cfg *, &xcfgp);
ef377a58 1353 const struct xport *xport;
f9038ef6 1354
e672ff9b
JR
1355 xport = xport_lookup(xcfg, tnl_port_should_receive(flow)
1356 ? tnl_port_receive(flow)
1357 : odp_port_to_ofport(backer, flow->in_port.odp_port));
1358 if (OVS_UNLIKELY(!xport)) {
1359 return NULL;
ef377a58 1360 }
e672ff9b 1361 *xportp = xport;
f9038ef6 1362 if (ofp_in_port) {
e672ff9b 1363 *ofp_in_port = xport->ofp_port;
f9038ef6 1364 }
e672ff9b 1365 return xport->xbridge->ofproto;
ef377a58
JR
1366}
1367
1368/* Given a datapath and flow metadata ('backer', and 'flow' respectively)
1369 * returns the corresponding struct ofproto_dpif and OpenFlow port number. */
1370struct ofproto_dpif *
1371xlate_lookup_ofproto(const struct dpif_backer *backer, const struct flow *flow,
1372 ofp_port_t *ofp_in_port)
1373{
1374 const struct xport *xport;
1375
1376 return xlate_lookup_ofproto_(backer, flow, ofp_in_port, &xport);
1377}
1378
cc377352 1379/* Given a datapath and flow metadata ('backer', and 'flow' respectively),
ef377a58 1380 * optionally populates 'ofproto' with the ofproto_dpif, 'ofp_in_port' with the
cc377352 1381 * openflow in_port, and 'ipfix', 'sflow', and 'netflow' with the appropriate
dcc2c6cd
JR
1382 * handles for those protocols if they're enabled. Caller may use the returned
1383 * pointers until quiescing, for longer term use additional references must
1384 * be taken.
8449c4d6 1385 *
f9038ef6 1386 * Returns 0 if successful, ENODEV if the parsed flow has no associated ofproto.
ef377a58 1387 */
8449c4d6 1388int
5c476ea3
JR
1389xlate_lookup(const struct dpif_backer *backer, const struct flow *flow,
1390 struct ofproto_dpif **ofprotop, struct dpif_ipfix **ipfix,
1391 struct dpif_sflow **sflow, struct netflow **netflow,
1392 ofp_port_t *ofp_in_port)
8449c4d6 1393{
ef377a58 1394 struct ofproto_dpif *ofproto;
84f0f298 1395 const struct xport *xport;
8449c4d6 1396
ef377a58 1397 ofproto = xlate_lookup_ofproto_(backer, flow, ofp_in_port, &xport);
8449c4d6 1398
f9038ef6 1399 if (!ofproto) {
cc377352 1400 return ENODEV;
8449c4d6 1401 }
8449c4d6 1402
ef377a58
JR
1403 if (ofprotop) {
1404 *ofprotop = ofproto;
8449c4d6
EJ
1405 }
1406
1dfdb9b3 1407 if (ipfix) {
f9038ef6 1408 *ipfix = xport ? xport->xbridge->ipfix : NULL;
1dfdb9b3
EJ
1409 }
1410
1411 if (sflow) {
f9038ef6 1412 *sflow = xport ? xport->xbridge->sflow : NULL;
1dfdb9b3
EJ
1413 }
1414
1415 if (netflow) {
f9038ef6 1416 *netflow = xport ? xport->xbridge->netflow : NULL;
1dfdb9b3 1417 }
f9038ef6 1418
cc377352 1419 return 0;
8449c4d6
EJ
1420}
1421
46c88433 1422static struct xbridge *
84f0f298 1423xbridge_lookup(struct xlate_cfg *xcfg, const struct ofproto_dpif *ofproto)
46c88433 1424{
84f0f298 1425 struct hmap *xbridges;
46c88433
EJ
1426 struct xbridge *xbridge;
1427
84f0f298 1428 if (!ofproto || !xcfg) {
5e6af486
EJ
1429 return NULL;
1430 }
1431
84f0f298
RW
1432 xbridges = &xcfg->xbridges;
1433
46c88433 1434 HMAP_FOR_EACH_IN_BUCKET (xbridge, hmap_node, hash_pointer(ofproto, 0),
84f0f298 1435 xbridges) {
46c88433
EJ
1436 if (xbridge->ofproto == ofproto) {
1437 return xbridge;
1438 }
1439 }
1440 return NULL;
1441}
1442
290835f9
BP
1443static struct xbridge *
1444xbridge_lookup_by_uuid(struct xlate_cfg *xcfg, const struct uuid *uuid)
1445{
1446 struct xbridge *xbridge;
1447
1448 HMAP_FOR_EACH (xbridge, hmap_node, &xcfg->xbridges) {
07a3cd5c 1449 if (uuid_equals(&xbridge->ofproto->uuid, uuid)) {
290835f9
BP
1450 return xbridge;
1451 }
1452 }
1453 return NULL;
1454}
1455
46c88433 1456static struct xbundle *
84f0f298 1457xbundle_lookup(struct xlate_cfg *xcfg, const struct ofbundle *ofbundle)
46c88433 1458{
84f0f298 1459 struct hmap *xbundles;
46c88433
EJ
1460 struct xbundle *xbundle;
1461
84f0f298 1462 if (!ofbundle || !xcfg) {
5e6af486
EJ
1463 return NULL;
1464 }
1465
84f0f298
RW
1466 xbundles = &xcfg->xbundles;
1467
46c88433 1468 HMAP_FOR_EACH_IN_BUCKET (xbundle, hmap_node, hash_pointer(ofbundle, 0),
84f0f298 1469 xbundles) {
46c88433
EJ
1470 if (xbundle->ofbundle == ofbundle) {
1471 return xbundle;
1472 }
1473 }
1474 return NULL;
1475}
1476
1477static struct xport *
84f0f298 1478xport_lookup(struct xlate_cfg *xcfg, const struct ofport_dpif *ofport)
46c88433 1479{
84f0f298 1480 struct hmap *xports;
46c88433
EJ
1481 struct xport *xport;
1482
84f0f298 1483 if (!ofport || !xcfg) {
5e6af486
EJ
1484 return NULL;
1485 }
1486
84f0f298
RW
1487 xports = &xcfg->xports;
1488
46c88433 1489 HMAP_FOR_EACH_IN_BUCKET (xport, hmap_node, hash_pointer(ofport, 0),
84f0f298 1490 xports) {
46c88433
EJ
1491 if (xport->ofport == ofport) {
1492 return xport;
1493 }
1494 }
1495 return NULL;
1496}
1497
40085e56
EJ
1498static struct stp_port *
1499xport_get_stp_port(const struct xport *xport)
1500{
92cf817b 1501 return xport->xbridge->stp && xport->stp_port_no != -1
40085e56
EJ
1502 ? stp_get_port(xport->xbridge->stp, xport->stp_port_no)
1503 : NULL;
1504}
9d189a50 1505
0d1cee12 1506static bool
9d189a50
EJ
1507xport_stp_learn_state(const struct xport *xport)
1508{
40085e56 1509 struct stp_port *sp = xport_get_stp_port(xport);
4b5f1996
DV
1510 return sp
1511 ? stp_learn_in_state(stp_port_get_state(sp))
1512 : true;
9d189a50
EJ
1513}
1514
1515static bool
1516xport_stp_forward_state(const struct xport *xport)
1517{
40085e56 1518 struct stp_port *sp = xport_get_stp_port(xport);
4b5f1996
DV
1519 return sp
1520 ? stp_forward_in_state(stp_port_get_state(sp))
1521 : true;
9d189a50
EJ
1522}
1523
0d1cee12 1524static bool
bacdb85a 1525xport_stp_should_forward_bpdu(const struct xport *xport)
0d1cee12
K
1526{
1527 struct stp_port *sp = xport_get_stp_port(xport);
bacdb85a 1528 return stp_should_forward_bpdu(sp ? stp_port_get_state(sp) : STP_DISABLED);
0d1cee12
K
1529}
1530
9d189a50
EJ
1531/* Returns true if STP should process 'flow'. Sets fields in 'wc' that
1532 * were used to make the determination.*/
1533static bool
1534stp_should_process_flow(const struct flow *flow, struct flow_wildcards *wc)
1535{
bbbca389 1536 /* is_stp() also checks dl_type, but dl_type is always set in 'wc'. */
9d189a50 1537 memset(&wc->masks.dl_dst, 0xff, sizeof wc->masks.dl_dst);
bbbca389 1538 return is_stp(flow);
9d189a50
EJ
1539}
1540
1541static void
cf62fa4c 1542stp_process_packet(const struct xport *xport, const struct dp_packet *packet)
9d189a50 1543{
40085e56 1544 struct stp_port *sp = xport_get_stp_port(xport);
cf62fa4c
PS
1545 struct dp_packet payload = *packet;
1546 struct eth_header *eth = dp_packet_data(&payload);
9d189a50
EJ
1547
1548 /* Sink packets on ports that have STP disabled when the bridge has
1549 * STP enabled. */
1550 if (!sp || stp_port_get_state(sp) == STP_DISABLED) {
1551 return;
1552 }
1553
1554 /* Trim off padding on payload. */
cf62fa4c
PS
1555 if (dp_packet_size(&payload) > ntohs(eth->eth_type) + ETH_HEADER_LEN) {
1556 dp_packet_set_size(&payload, ntohs(eth->eth_type) + ETH_HEADER_LEN);
9d189a50
EJ
1557 }
1558
cf62fa4c
PS
1559 if (dp_packet_try_pull(&payload, ETH_HEADER_LEN + LLC_HEADER_LEN)) {
1560 stp_received_bpdu(sp, dp_packet_data(&payload), dp_packet_size(&payload));
9d189a50
EJ
1561 }
1562}
1563
f025bcb7
JR
1564static enum rstp_state
1565xport_get_rstp_port_state(const struct xport *xport)
9efd308e 1566{
f025bcb7
JR
1567 return xport->rstp_port
1568 ? rstp_port_get_state(xport->rstp_port)
1569 : RSTP_DISABLED;
9efd308e
DV
1570}
1571
1572static bool
1573xport_rstp_learn_state(const struct xport *xport)
1574{
4b5f1996
DV
1575 return xport->xbridge->rstp && xport->rstp_port
1576 ? rstp_learn_in_state(xport_get_rstp_port_state(xport))
1577 : true;
9efd308e
DV
1578}
1579
1580static bool
1581xport_rstp_forward_state(const struct xport *xport)
1582{
4b5f1996
DV
1583 return xport->xbridge->rstp && xport->rstp_port
1584 ? rstp_forward_in_state(xport_get_rstp_port_state(xport))
1585 : true;
9efd308e
DV
1586}
1587
1588static bool
1589xport_rstp_should_manage_bpdu(const struct xport *xport)
1590{
f025bcb7 1591 return rstp_should_manage_bpdu(xport_get_rstp_port_state(xport));
9efd308e
DV
1592}
1593
1594static void
cf62fa4c 1595rstp_process_packet(const struct xport *xport, const struct dp_packet *packet)
9efd308e 1596{
cf62fa4c
PS
1597 struct dp_packet payload = *packet;
1598 struct eth_header *eth = dp_packet_data(&payload);
9efd308e 1599
f025bcb7
JR
1600 /* Sink packets on ports that have no RSTP. */
1601 if (!xport->rstp_port) {
9efd308e
DV
1602 return;
1603 }
1604
1605 /* Trim off padding on payload. */
cf62fa4c
PS
1606 if (dp_packet_size(&payload) > ntohs(eth->eth_type) + ETH_HEADER_LEN) {
1607 dp_packet_set_size(&payload, ntohs(eth->eth_type) + ETH_HEADER_LEN);
9efd308e
DV
1608 }
1609
cf62fa4c
PS
1610 if (dp_packet_try_pull(&payload, ETH_HEADER_LEN + LLC_HEADER_LEN)) {
1611 rstp_port_received_bpdu(xport->rstp_port, dp_packet_data(&payload),
1612 dp_packet_size(&payload));
9efd308e
DV
1613 }
1614}
1615
46c88433
EJ
1616static struct xport *
1617get_ofp_port(const struct xbridge *xbridge, ofp_port_t ofp_port)
1618{
1619 struct xport *xport;
1620
1621 HMAP_FOR_EACH_IN_BUCKET (xport, ofp_node, hash_ofp_port(ofp_port),
1622 &xbridge->xports) {
1623 if (xport->ofp_port == ofp_port) {
1624 return xport;
1625 }
1626 }
1627 return NULL;
1628}
1629
1630static odp_port_t
1631ofp_port_to_odp_port(const struct xbridge *xbridge, ofp_port_t ofp_port)
1632{
1633 const struct xport *xport = get_ofp_port(xbridge, ofp_port);
1634 return xport ? xport->odp_port : ODPP_NONE;
1635}
1636
dd8cd4b4
SH
1637static bool
1638odp_port_is_alive(const struct xlate_ctx *ctx, ofp_port_t ofp_port)
1639{
086fa873
BP
1640 struct xport *xport = get_ofp_port(ctx->xbridge, ofp_port);
1641 return xport && xport->may_enable;
dd8cd4b4
SH
1642}
1643
1e684d7d 1644static struct ofputil_bucket *
dd8cd4b4
SH
1645group_first_live_bucket(const struct xlate_ctx *, const struct group_dpif *,
1646 int depth);
1647
1648static bool
1649group_is_alive(const struct xlate_ctx *ctx, uint32_t group_id, int depth)
1650{
1651 struct group_dpif *group;
dd8cd4b4 1652
5d08a275 1653 group = group_dpif_lookup(ctx->xbridge->ofproto, group_id,
1f4a8933 1654 ctx->xin->tables_version, false);
db88b35c 1655 if (group) {
76973237 1656 return group_first_live_bucket(ctx, group, depth) != NULL;
dc25893e 1657 }
dd8cd4b4 1658
dc25893e 1659 return false;
dd8cd4b4
SH
1660}
1661
1662#define MAX_LIVENESS_RECURSION 128 /* Arbitrary limit */
1663
1664static bool
1665bucket_is_alive(const struct xlate_ctx *ctx,
1e684d7d 1666 struct ofputil_bucket *bucket, int depth)
dd8cd4b4
SH
1667{
1668 if (depth >= MAX_LIVENESS_RECURSION) {
2d9b49dd
BP
1669 xlate_report_error(ctx, "bucket chaining exceeded %d links",
1670 MAX_LIVENESS_RECURSION);
dd8cd4b4
SH
1671 return false;
1672 }
1673
fdb1999b
AZ
1674 return (!ofputil_bucket_has_liveness(bucket)
1675 || (bucket->watch_port != OFPP_ANY
1676 && odp_port_is_alive(ctx, bucket->watch_port))
1677 || (bucket->watch_group != OFPG_ANY
1678 && group_is_alive(ctx, bucket->watch_group, depth + 1)));
dd8cd4b4
SH
1679}
1680
1e684d7d 1681static struct ofputil_bucket *
dd8cd4b4
SH
1682group_first_live_bucket(const struct xlate_ctx *ctx,
1683 const struct group_dpif *group, int depth)
1684{
1685 struct ofputil_bucket *bucket;
07a3cd5c 1686 LIST_FOR_EACH (bucket, list_node, &group->up.buckets) {
dd8cd4b4
SH
1687 if (bucket_is_alive(ctx, bucket, depth)) {
1688 return bucket;
1689 }
1690 }
1691
1692 return NULL;
1693}
1694
1e684d7d 1695static struct ofputil_bucket *
fe7e5749
SH
1696group_best_live_bucket(const struct xlate_ctx *ctx,
1697 const struct group_dpif *group,
1698 uint32_t basis)
1699{
1e684d7d 1700 struct ofputil_bucket *best_bucket = NULL;
fe7e5749 1701 uint32_t best_score = 0;
fe7e5749 1702
1e684d7d 1703 struct ofputil_bucket *bucket;
07a3cd5c 1704 LIST_FOR_EACH (bucket, list_node, &group->up.buckets) {
fe7e5749 1705 if (bucket_is_alive(ctx, bucket, 0)) {
c09cb861
LS
1706 uint32_t score =
1707 (hash_int(bucket->bucket_id, basis) & 0xffff) * bucket->weight;
fe7e5749
SH
1708 if (score >= best_score) {
1709 best_bucket = bucket;
1710 best_score = score;
1711 }
1712 }
fe7e5749
SH
1713 }
1714
1715 return best_bucket;
1716}
1717
9583bc14 1718static bool
46c88433 1719xbundle_trunks_vlan(const struct xbundle *bundle, uint16_t vlan)
9583bc14
EJ
1720{
1721 return (bundle->vlan_mode != PORT_VLAN_ACCESS
1722 && (!bundle->trunks || bitmap_is_set(bundle->trunks, vlan)));
1723}
1724
fed8962a
EG
1725static bool
1726xbundle_allows_cvlan(const struct xbundle *bundle, uint16_t vlan)
1727{
1728 return (!bundle->cvlans || bitmap_is_set(bundle->cvlans, vlan));
1729}
1730
9583bc14 1731static bool
f0fb825a 1732xbundle_includes_vlan(const struct xbundle *xbundle, const struct xvlan *xvlan)
46c88433 1733{
f0fb825a
EG
1734 switch (xbundle->vlan_mode) {
1735 case PORT_VLAN_ACCESS:
1736 return xvlan->v[0].vid == xbundle->vlan && xvlan->v[1].vid == 0;
1737
1738 case PORT_VLAN_TRUNK:
1739 case PORT_VLAN_NATIVE_UNTAGGED:
1740 case PORT_VLAN_NATIVE_TAGGED:
1741 return xbundle_trunks_vlan(xbundle, xvlan->v[0].vid);
1742
fed8962a
EG
1743 case PORT_VLAN_DOT1Q_TUNNEL:
1744 return xvlan->v[0].vid == xbundle->vlan &&
1745 xbundle_allows_cvlan(xbundle, xvlan->v[1].vid);
1746
f0fb825a
EG
1747 default:
1748 OVS_NOT_REACHED();
1749 }
46c88433
EJ
1750}
1751
1752static mirror_mask_t
1753xbundle_mirror_out(const struct xbridge *xbridge, struct xbundle *xbundle)
1754{
1755 return xbundle != &ofpp_none_bundle
1756 ? mirror_bundle_out(xbridge->mbridge, xbundle->ofbundle)
1757 : 0;
1758}
1759
1760static mirror_mask_t
1761xbundle_mirror_src(const struct xbridge *xbridge, struct xbundle *xbundle)
9583bc14 1762{
46c88433
EJ
1763 return xbundle != &ofpp_none_bundle
1764 ? mirror_bundle_src(xbridge->mbridge, xbundle->ofbundle)
1765 : 0;
9583bc14
EJ
1766}
1767
46c88433
EJ
1768static mirror_mask_t
1769xbundle_mirror_dst(const struct xbridge *xbridge, struct xbundle *xbundle)
9583bc14 1770{
46c88433
EJ
1771 return xbundle != &ofpp_none_bundle
1772 ? mirror_bundle_dst(xbridge->mbridge, xbundle->ofbundle)
1773 : 0;
1774}
1775
1776static struct xbundle *
2d9b49dd
BP
1777lookup_input_bundle__(const struct xbridge *xbridge,
1778 ofp_port_t in_port, struct xport **in_xportp)
46c88433
EJ
1779{
1780 struct xport *xport;
9583bc14
EJ
1781
1782 /* Find the port and bundle for the received packet. */
46c88433
EJ
1783 xport = get_ofp_port(xbridge, in_port);
1784 if (in_xportp) {
1785 *in_xportp = xport;
9583bc14 1786 }
46c88433
EJ
1787 if (xport && xport->xbundle) {
1788 return xport->xbundle;
9583bc14
EJ
1789 }
1790
6362203b
YT
1791 /* Special-case OFPP_NONE (OF1.0) and OFPP_CONTROLLER (OF1.1+),
1792 * which a controller may use as the ingress port for traffic that
1793 * it is sourcing. */
1794 if (in_port == OFPP_CONTROLLER || in_port == OFPP_NONE) {
9583bc14
EJ
1795 return &ofpp_none_bundle;
1796 }
2d9b49dd
BP
1797 return NULL;
1798}
9583bc14 1799
2d9b49dd
BP
1800static struct xbundle *
1801lookup_input_bundle(const struct xlate_ctx *ctx,
1802 ofp_port_t in_port, struct xport **in_xportp)
1803{
1804 struct xbundle *xbundle = lookup_input_bundle__(ctx->xbridge,
1805 in_port, in_xportp);
1806 if (!xbundle) {
1807 /* Odd. A few possible reasons here:
1808 *
1809 * - We deleted a port but there are still a few packets queued up
1810 * from it.
1811 *
1812 * - Someone externally added a port (e.g. "ovs-dpctl add-if") that
1813 * we don't know about.
1814 *
1815 * - The ofproto client didn't configure the port as part of a bundle.
1816 * This is particularly likely to happen if a packet was received on
1817 * the port after it was created, but before the client had a chance
1818 * to configure its bundle.
1819 */
94783c7c 1820 xlate_report_error(ctx, "received packet on unknown port %"PRIu32,
2d9b49dd 1821 in_port);
9583bc14 1822 }
2d9b49dd 1823 return xbundle;
9583bc14
EJ
1824}
1825
faa624b4
BP
1826/* Mirrors the packet represented by 'ctx' to appropriate mirror destinations,
1827 * given the packet is ingressing or egressing on 'xbundle', which has ingress
1828 * or egress (as appropriate) mirrors 'mirrors'. */
9583bc14 1829static void
7efbc3b7
BP
1830mirror_packet(struct xlate_ctx *ctx, struct xbundle *xbundle,
1831 mirror_mask_t mirrors)
9583bc14 1832{
f0fb825a
EG
1833 struct xvlan in_xvlan;
1834 struct xvlan xvlan;
1835
faa624b4
BP
1836 /* Figure out what VLAN the packet is in (because mirrors can select
1837 * packets on basis of VLAN). */
f0fb825a
EG
1838 xvlan_extract(&ctx->xin->flow, &in_xvlan);
1839 if (!input_vid_is_valid(ctx, in_xvlan.v[0].vid, xbundle)) {
9583bc14
EJ
1840 return;
1841 }
f0fb825a 1842 xvlan_input_translate(xbundle, &in_xvlan, &xvlan);
9583bc14 1843
7efbc3b7 1844 const struct xbridge *xbridge = ctx->xbridge;
9583bc14 1845
7efbc3b7
BP
1846 /* Don't mirror to destinations that we've already mirrored to. */
1847 mirrors &= ~ctx->mirrors;
9583bc14
EJ
1848 if (!mirrors) {
1849 return;
1850 }
1851
7efbc3b7
BP
1852 if (ctx->xin->resubmit_stats) {
1853 mirror_update_stats(xbridge->mbridge, mirrors,
1854 ctx->xin->resubmit_stats->n_packets,
1855 ctx->xin->resubmit_stats->n_bytes);
1856 }
1857 if (ctx->xin->xcache) {
1858 struct xc_entry *entry;
1859
1860 entry = xlate_cache_add_entry(ctx->xin->xcache, XC_MIRROR);
901a517e
JR
1861 entry->mirror.mbridge = mbridge_ref(xbridge->mbridge);
1862 entry->mirror.mirrors = mirrors;
7efbc3b7 1863 }
9583bc14 1864
faa624b4
BP
1865 /* 'mirrors' is a bit-mask of candidates for mirroring. Iterate as long as
1866 * some candidates remain. */
9583bc14 1867 while (mirrors) {
7efbc3b7 1868 const unsigned long *vlans;
ec7ceaed
EJ
1869 mirror_mask_t dup_mirrors;
1870 struct ofbundle *out;
ec7ceaed 1871 int out_vlan;
1356dbd1 1872 int snaplen;
ec7ceaed 1873
faa624b4 1874 /* Get the details of the mirror represented by the rightmost 1-bit. */
7efbc3b7 1875 bool has_mirror = mirror_get(xbridge->mbridge, raw_ctz(mirrors),
1356dbd1
WT
1876 &vlans, &dup_mirrors,
1877 &out, &snaplen, &out_vlan);
ec7ceaed
EJ
1878 ovs_assert(has_mirror);
1879
1356dbd1 1880
faa624b4
BP
1881 /* If this mirror selects on the basis of VLAN, and it does not select
1882 * 'vlan', then discard this mirror and go on to the next one. */
ec7ceaed 1883 if (vlans) {
f0fb825a 1884 ctx->wc->masks.vlans[0].tci |= htons(VLAN_CFI | VLAN_VID_MASK);
9583bc14 1885 }
f0fb825a 1886 if (vlans && !bitmap_is_set(vlans, xvlan.v[0].vid)) {
9583bc14
EJ
1887 mirrors = zero_rightmost_1bit(mirrors);
1888 continue;
1889 }
1890
faa624b4
BP
1891 /* Record the mirror, and the mirrors that output to the same
1892 * destination, so that we don't mirror to them again. This must be
1893 * done now to ensure that output_normal(), below, doesn't recursively
1894 * output to the same mirrors. */
3d6151f3 1895 ctx->mirrors |= dup_mirrors;
1356dbd1 1896 ctx->mirror_snaplen = snaplen;
faa624b4
BP
1897
1898 /* Send the packet to the mirror. */
ec7ceaed 1899 if (out) {
84f0f298
RW
1900 struct xlate_cfg *xcfg = ovsrcu_get(struct xlate_cfg *, &xcfgp);
1901 struct xbundle *out_xbundle = xbundle_lookup(xcfg, out);
46c88433 1902 if (out_xbundle) {
f0fb825a 1903 output_normal(ctx, out_xbundle, &xvlan);
46c88433 1904 }
f0fb825a 1905 } else if (xvlan.v[0].vid != out_vlan
7efbc3b7 1906 && !eth_addr_is_reserved(ctx->xin->flow.dl_dst)) {
71f21279 1907 struct xbundle *xb;
f0fb825a 1908 uint16_t old_vid = xvlan.v[0].vid;
9583bc14 1909
f0fb825a 1910 xvlan.v[0].vid = out_vlan;
71f21279
BP
1911 LIST_FOR_EACH (xb, list_node, &xbridge->xbundles) {
1912 if (xbundle_includes_vlan(xb, &xvlan)
1913 && !xbundle_mirror_out(xbridge, xb)) {
1914 output_normal(ctx, xb, &xvlan);
9583bc14
EJ
1915 }
1916 }
f0fb825a 1917 xvlan.v[0].vid = old_vid;
9583bc14 1918 }
faa624b4
BP
1919
1920 /* output_normal() could have recursively output (to different
1921 * mirrors), so make sure that we don't send duplicates. */
1922 mirrors &= ~ctx->mirrors;
1356dbd1 1923 ctx->mirror_snaplen = 0;
9583bc14
EJ
1924 }
1925}
1926
7efbc3b7
BP
1927static void
1928mirror_ingress_packet(struct xlate_ctx *ctx)
1929{
1930 if (mbridge_has_mirrors(ctx->xbridge->mbridge)) {
7efbc3b7 1931 struct xbundle *xbundle = lookup_input_bundle(
2d9b49dd 1932 ctx, ctx->xin->flow.in_port.ofp_port, NULL);
7efbc3b7
BP
1933 if (xbundle) {
1934 mirror_packet(ctx, xbundle,
1935 xbundle_mirror_src(ctx->xbridge, xbundle));
1936 }
1937 }
1938}
1939
46c88433 1940/* Checks whether a packet with the given 'vid' may ingress on 'in_xbundle'.
2d9b49dd 1941 * If so, returns true. Otherwise, returns false.
9583bc14
EJ
1942 *
1943 * 'vid' should be the VID obtained from the 802.1Q header that was received as
1944 * part of a packet (specify 0 if there was no 802.1Q header), in the range
1945 * 0...4095. */
1946static bool
2d9b49dd
BP
1947input_vid_is_valid(const struct xlate_ctx *ctx,
1948 uint16_t vid, struct xbundle *in_xbundle)
9583bc14
EJ
1949{
1950 /* Allow any VID on the OFPP_NONE port. */
46c88433 1951 if (in_xbundle == &ofpp_none_bundle) {
9583bc14
EJ
1952 return true;
1953 }
1954
46c88433 1955 switch (in_xbundle->vlan_mode) {
9583bc14
EJ
1956 case PORT_VLAN_ACCESS:
1957 if (vid) {
2d9b49dd
BP
1958 xlate_report_error(ctx, "dropping VLAN %"PRIu16" tagged "
1959 "packet received on port %s configured as VLAN "
fd13c6b5 1960 "%d access port", vid, in_xbundle->name,
2d9b49dd 1961 in_xbundle->vlan);
9583bc14
EJ
1962 return false;
1963 }
1964 return true;
1965
1966 case PORT_VLAN_NATIVE_UNTAGGED:
1967 case PORT_VLAN_NATIVE_TAGGED:
1968 if (!vid) {
1969 /* Port must always carry its native VLAN. */
1970 return true;
1971 }
1972 /* Fall through. */
1973 case PORT_VLAN_TRUNK:
f0fb825a 1974 if (!xbundle_trunks_vlan(in_xbundle, vid)) {
2d9b49dd
BP
1975 xlate_report_error(ctx, "dropping VLAN %"PRIu16" packet "
1976 "received on port %s not configured for "
1977 "trunking VLAN %"PRIu16,
1978 vid, in_xbundle->name, vid);
9583bc14
EJ
1979 return false;
1980 }
1981 return true;
1982
fed8962a
EG
1983 case PORT_VLAN_DOT1Q_TUNNEL:
1984 if (!xbundle_allows_cvlan(in_xbundle, vid)) {
1985 xlate_report_error(ctx, "dropping VLAN %"PRIu16" packet received "
1986 "on dot1q-tunnel port %s that excludes this "
1987 "VLAN", vid, in_xbundle->name);
1988 return false;
1989 }
1990 return true;
1991
9583bc14 1992 default:
428b2edd 1993 OVS_NOT_REACHED();
9583bc14
EJ
1994 }
1995
1996}
1997
f0fb825a
EG
1998static void
1999xvlan_copy(struct xvlan *dst, const struct xvlan *src)
2000{
2001 *dst = *src;
2002}
2003
2004static void
2005xvlan_pop(struct xvlan *src)
2006{
2007 memmove(&src->v[0], &src->v[1], sizeof(src->v) - sizeof(src->v[0]));
2008 memset(&src->v[FLOW_MAX_VLAN_HEADERS - 1], 0,
2009 sizeof(src->v[FLOW_MAX_VLAN_HEADERS - 1]));
2010}
2011
fed8962a
EG
2012static void
2013xvlan_push_uninit(struct xvlan *src)
2014{
2015 memmove(&src->v[1], &src->v[0], sizeof(src->v) - sizeof(src->v[0]));
2016 memset(&src->v[0], 0, sizeof(src->v[0]));
2017}
2018
f0fb825a
EG
2019/* Extract VLAN information (headers) from flow */
2020static void
2021xvlan_extract(const struct flow *flow, struct xvlan *xvlan)
2022{
2023 int i;
2024 memset(xvlan, 0, sizeof(*xvlan));
2025 for (i = 0; i < FLOW_MAX_VLAN_HEADERS; i++) {
2026 if (!eth_type_vlan(flow->vlans[i].tpid) ||
2027 !(flow->vlans[i].tci & htons(VLAN_CFI))) {
2028 break;
2029 }
2030 xvlan->v[i].tpid = ntohs(flow->vlans[i].tpid);
2031 xvlan->v[i].vid = vlan_tci_to_vid(flow->vlans[i].tci);
2032 xvlan->v[i].pcp = ntohs(flow->vlans[i].tci) & VLAN_PCP_MASK;
2033 }
2034}
2035
2036/* Put VLAN information (headers) to flow */
2037static void
2038xvlan_put(struct flow *flow, const struct xvlan *xvlan)
2039{
2040 ovs_be16 tci;
2041 int i;
2042 for (i = 0; i < FLOW_MAX_VLAN_HEADERS; i++) {
2043 tci = htons(xvlan->v[i].vid | (xvlan->v[i].pcp & VLAN_PCP_MASK));
2044 if (tci) {
2045 tci |= htons(VLAN_CFI);
2046 flow->vlans[i].tpid = xvlan->v[i].tpid ?
2047 htons(xvlan->v[i].tpid) :
2048 htons(ETH_TYPE_VLAN_8021Q);
2049 }
2050 flow->vlans[i].tci = tci;
2051 }
2052}
2053
2054/* Given 'in_xvlan', extracted from the input 802.1Q headers received as part
2055 * of a packet, and 'in_xbundle', the bundle on which the packet was received,
2056 * returns the VLANs of the packet during bridge internal processing. */
2057static void
2058xvlan_input_translate(const struct xbundle *in_xbundle,
2059 const struct xvlan *in_xvlan, struct xvlan *xvlan)
2060{
2061
2062 switch (in_xbundle->vlan_mode) {
2063 case PORT_VLAN_ACCESS:
2064 memset(xvlan, 0, sizeof(*xvlan));
2065 xvlan->v[0].tpid = in_xvlan->v[0].tpid ? in_xvlan->v[0].tpid :
2066 ETH_TYPE_VLAN_8021Q;
2067 xvlan->v[0].vid = in_xbundle->vlan;
2068 xvlan->v[0].pcp = in_xvlan->v[0].pcp;
2069 break;
2070
2071 case PORT_VLAN_TRUNK:
2072 xvlan_copy(xvlan, in_xvlan);
2073 break;
2074
2075 case PORT_VLAN_NATIVE_UNTAGGED:
2076 case PORT_VLAN_NATIVE_TAGGED:
2077 xvlan_copy(xvlan, in_xvlan);
2078 if (!in_xvlan->v[0].vid) {
2079 xvlan->v[0].tpid = in_xvlan->v[0].tpid ? in_xvlan->v[0].tpid :
2080 ETH_TYPE_VLAN_8021Q;
2081 xvlan->v[0].vid = in_xbundle->vlan;
2082 xvlan->v[0].pcp = in_xvlan->v[0].pcp;
2083 }
2084 break;
2085
fed8962a
EG
2086 case PORT_VLAN_DOT1Q_TUNNEL:
2087 xvlan_copy(xvlan, in_xvlan);
2088 xvlan_push_uninit(xvlan);
2089 xvlan->v[0].tpid = in_xbundle->qinq_ethtype;
2090 xvlan->v[0].vid = in_xbundle->vlan;
2091 xvlan->v[0].pcp = 0;
2092 break;
2093
f0fb825a
EG
2094 default:
2095 OVS_NOT_REACHED();
2096 }
2097}
2098
2099/* Given 'xvlan', the VLANs of a packet during internal processing, and
2100 * 'out_xbundle', a bundle on which the packet is to be output, returns the
2101 * VLANs that should be included in output packet. */
2102static void
2103xvlan_output_translate(const struct xbundle *out_xbundle,
2104 const struct xvlan *xvlan, struct xvlan *out_xvlan)
9583bc14 2105{
46c88433 2106 switch (out_xbundle->vlan_mode) {
9583bc14 2107 case PORT_VLAN_ACCESS:
f0fb825a
EG
2108 memset(out_xvlan, 0, sizeof(*out_xvlan));
2109 break;
9583bc14
EJ
2110
2111 case PORT_VLAN_TRUNK:
2112 case PORT_VLAN_NATIVE_TAGGED:
f0fb825a
EG
2113 xvlan_copy(out_xvlan, xvlan);
2114 break;
9583bc14
EJ
2115
2116 case PORT_VLAN_NATIVE_UNTAGGED:
f0fb825a
EG
2117 xvlan_copy(out_xvlan, xvlan);
2118 if (xvlan->v[0].vid == out_xbundle->vlan) {
2119 xvlan_pop(out_xvlan);
2120 }
2121 break;
9583bc14 2122
fed8962a
EG
2123 case PORT_VLAN_DOT1Q_TUNNEL:
2124 xvlan_copy(out_xvlan, xvlan);
2125 xvlan_pop(out_xvlan);
2126 break;
2127
9583bc14 2128 default:
428b2edd 2129 OVS_NOT_REACHED();
9583bc14
EJ
2130 }
2131}
2132
fed8962a
EG
2133/* If output xbundle is dot1q-tunnel, set mask bits of cvlan */
2134static void
2135check_and_set_cvlan_mask(struct flow_wildcards *wc,
2136 const struct xbundle *xbundle)
2137{
2138 if (xbundle->vlan_mode == PORT_VLAN_DOT1Q_TUNNEL && xbundle->cvlans) {
2139 wc->masks.vlans[1].tci = htons(0xffff);
2140 }
2141}
2142
9583bc14 2143static void
46c88433 2144output_normal(struct xlate_ctx *ctx, const struct xbundle *out_xbundle,
f0fb825a 2145 const struct xvlan *xvlan)
9583bc14 2146{
9583bc14 2147 uint16_t vid;
f0fb825a 2148 union flow_vlan_hdr old_vlans[FLOW_MAX_VLAN_HEADERS];
46c88433 2149 struct xport *xport;
e93ef1c7
JR
2150 struct xlate_bond_recirc xr;
2151 bool use_recirc = false;
f0fb825a 2152 struct xvlan out_xvlan;
9583bc14 2153
fed8962a
EG
2154 check_and_set_cvlan_mask(ctx->wc, out_xbundle);
2155
f0fb825a
EG
2156 xvlan_output_translate(out_xbundle, xvlan, &out_xvlan);
2157 if (out_xbundle->use_priority_tags) {
2158 out_xvlan.v[0].pcp = ntohs(ctx->xin->flow.vlans[0].tci) &
2159 VLAN_PCP_MASK;
2160 }
2161 vid = out_xvlan.v[0].vid;
417e7e66 2162 if (ovs_list_is_empty(&out_xbundle->xports)) {
46c88433
EJ
2163 /* Partially configured bundle with no slaves. Drop the packet. */
2164 return;
2165 } else if (!out_xbundle->bond) {
417e7e66 2166 xport = CONTAINER_OF(ovs_list_front(&out_xbundle->xports), struct xport,
46c88433 2167 bundle_node);
9583bc14 2168 } else {
84f0f298 2169 struct xlate_cfg *xcfg = ovsrcu_get(struct xlate_cfg *, &xcfgp);
49a73e0c 2170 struct flow_wildcards *wc = ctx->wc;
84f0f298 2171 struct ofport_dpif *ofport;
adcf00ba 2172
a80aba3a
AZ
2173 if (ctx->xbridge->support.odp.recirc) {
2174 /* In case recirculation is not actually in use, 'xr.recirc_id'
2175 * will be set to '0', since a valid 'recirc_id' can
82f9f1f5
AZ
2176 * not be zero. */
2177 bond_update_post_recirc_rules(out_xbundle->bond,
2178 &xr.recirc_id,
2179 &xr.hash_basis);
2180 if (xr.recirc_id) {
2181 /* Use recirculation instead of output. */
2182 use_recirc = true;
e93ef1c7 2183 xr.hash_alg = OVS_HASH_ALG_L4;
54ecb5a2
AZ
2184 /* Recirculation does not require unmasking hash fields. */
2185 wc = NULL;
adcf00ba
AZ
2186 }
2187 }
46c88433 2188
54ecb5a2
AZ
2189 ofport = bond_choose_output_slave(out_xbundle->bond,
2190 &ctx->xin->flow, wc, vid);
84f0f298 2191 xport = xport_lookup(xcfg, ofport);
46c88433
EJ
2192
2193 if (!xport) {
9583bc14
EJ
2194 /* No slaves enabled, so drop packet. */
2195 return;
2196 }
d6fc5f57 2197
e93ef1c7 2198 /* If use_recirc is set, the main thread will handle stats
b256dc52 2199 * accounting for this bond. */
e93ef1c7 2200 if (!use_recirc) {
b256dc52
JS
2201 if (ctx->xin->resubmit_stats) {
2202 bond_account(out_xbundle->bond, &ctx->xin->flow, vid,
2203 ctx->xin->resubmit_stats->n_bytes);
2204 }
2205 if (ctx->xin->xcache) {
2206 struct xc_entry *entry;
2207 struct flow *flow;
2208
2209 flow = &ctx->xin->flow;
2210 entry = xlate_cache_add_entry(ctx->xin->xcache, XC_BOND);
901a517e
JR
2211 entry->bond.bond = bond_ref(out_xbundle->bond);
2212 entry->bond.flow = xmemdup(flow, sizeof *flow);
2213 entry->bond.vid = vid;
b256dc52 2214 }
d6fc5f57 2215 }
9583bc14
EJ
2216 }
2217
f0fb825a
EG
2218 memcpy(&old_vlans, &ctx->xin->flow.vlans, sizeof(old_vlans));
2219 xvlan_put(&ctx->xin->flow, &out_xvlan);
9583bc14 2220
e93ef1c7 2221 compose_output_action(ctx, xport->ofp_port, use_recirc ? &xr : NULL);
f0fb825a 2222 memcpy(&ctx->xin->flow.vlans, &old_vlans, sizeof(old_vlans));
9583bc14
EJ
2223}
2224
2225/* A VM broadcasts a gratuitous ARP to indicate that it has resumed after
2226 * migration. Older Citrix-patched Linux DomU used gratuitous ARP replies to
2227 * indicate this; newer upstream kernels use gratuitous ARP requests. */
2228static bool
2229is_gratuitous_arp(const struct flow *flow, struct flow_wildcards *wc)
2230{
2231 if (flow->dl_type != htons(ETH_TYPE_ARP)) {
2232 return false;
2233 }
2234
2235 memset(&wc->masks.dl_dst, 0xff, sizeof wc->masks.dl_dst);
2236 if (!eth_addr_is_broadcast(flow->dl_dst)) {
2237 return false;
2238 }
2239
2240 memset(&wc->masks.nw_proto, 0xff, sizeof wc->masks.nw_proto);
2241 if (flow->nw_proto == ARP_OP_REPLY) {
2242 return true;
2243 } else if (flow->nw_proto == ARP_OP_REQUEST) {
2244 memset(&wc->masks.nw_src, 0xff, sizeof wc->masks.nw_src);
2245 memset(&wc->masks.nw_dst, 0xff, sizeof wc->masks.nw_dst);
2246
2247 return flow->nw_src == flow->nw_dst;
2248 } else {
2249 return false;
2250 }
2251}
2252
ff69c24a
FL
2253/* Determines whether packets in 'flow' within 'xbridge' should be forwarded or
2254 * dropped. Returns true if they may be forwarded, false if they should be
2255 * dropped.
2256 *
2257 * 'in_port' must be the xport that corresponds to flow->in_port.
2258 * 'in_port' must be part of a bundle (e.g. in_port->bundle must be nonnull).
2259 *
2260 * 'vlan' must be the VLAN that corresponds to flow->vlan_tci on 'in_port', as
2261 * returned by input_vid_to_vlan(). It must be a valid VLAN for 'in_port', as
2262 * checked by input_vid_is_valid().
2263 *
2264 * May also add tags to '*tags', although the current implementation only does
2265 * so in one special case.
2266 */
2267static bool
2268is_admissible(struct xlate_ctx *ctx, struct xport *in_port,
2269 uint16_t vlan)
2270{
2271 struct xbundle *in_xbundle = in_port->xbundle;
2272 const struct xbridge *xbridge = ctx->xbridge;
2273 struct flow *flow = &ctx->xin->flow;
2274
2275 /* Drop frames for reserved multicast addresses
2276 * only if forward_bpdu option is absent. */
2277 if (!xbridge->forward_bpdu && eth_addr_is_reserved(flow->dl_dst)) {
2d9b49dd
BP
2278 xlate_report(ctx, OFT_DETAIL,
2279 "packet has reserved destination MAC, dropping");
ff69c24a
FL
2280 return false;
2281 }
2282
2283 if (in_xbundle->bond) {
2284 struct mac_entry *mac;
2285
2286 switch (bond_check_admissibility(in_xbundle->bond, in_port->ofport,
2287 flow->dl_dst)) {
2288 case BV_ACCEPT:
2289 break;
2290
2291 case BV_DROP:
2d9b49dd
BP
2292 xlate_report(ctx, OFT_DETAIL,
2293 "bonding refused admissibility, dropping");
ff69c24a
FL
2294 return false;
2295
2296 case BV_DROP_IF_MOVED:
2297 ovs_rwlock_rdlock(&xbridge->ml->rwlock);
2298 mac = mac_learning_lookup(xbridge->ml, flow->dl_src, vlan);
9d078ec2
BP
2299 if (mac
2300 && mac_entry_get_port(xbridge->ml, mac) != in_xbundle->ofbundle
49a73e0c 2301 && (!is_gratuitous_arp(flow, ctx->wc)
9d078ec2 2302 || mac_entry_is_grat_arp_locked(mac))) {
ff69c24a 2303 ovs_rwlock_unlock(&xbridge->ml->rwlock);
2d9b49dd
BP
2304 xlate_report(ctx, OFT_DETAIL,
2305 "SLB bond thinks this packet looped back, "
ff69c24a
FL
2306 "dropping");
2307 return false;
2308 }
2309 ovs_rwlock_unlock(&xbridge->ml->rwlock);
2310 break;
2311 }
2312 }
2313
2314 return true;
2315}
2316
2d9b49dd
BP
2317static bool
2318update_learning_table__(const struct xbridge *xbridge,
2319 struct xbundle *in_xbundle, struct eth_addr dl_src,
2320 int vlan, bool is_grat_arp)
2321{
2322 return (in_xbundle == &ofpp_none_bundle
2323 || !mac_learning_update(xbridge->ml, dl_src, vlan,
2324 is_grat_arp,
2325 in_xbundle->bond != NULL,
2326 in_xbundle->ofbundle));
2327}
2328
ee047520 2329static void
2d9b49dd 2330update_learning_table(const struct xlate_ctx *ctx,
064799a1
JR
2331 struct xbundle *in_xbundle, struct eth_addr dl_src,
2332 int vlan, bool is_grat_arp)
ee047520 2333{
2d9b49dd
BP
2334 if (!update_learning_table__(ctx->xbridge, in_xbundle, dl_src, vlan,
2335 is_grat_arp)) {
2336 xlate_report_debug(ctx, OFT_DETAIL, "learned that "ETH_ADDR_FMT" is "
2337 "on port %s in VLAN %d",
2338 ETH_ADDR_ARGS(dl_src), in_xbundle->name, vlan);
ee047520 2339 }
9583bc14
EJ
2340}
2341
86e2dcdd
FL
2342/* Updates multicast snooping table 'ms' given that a packet matching 'flow'
2343 * was received on 'in_xbundle' in 'vlan' and is either Report or Query. */
2344static void
2d9b49dd 2345update_mcast_snooping_table4__(const struct xlate_ctx *ctx,
06994f87
TLSC
2346 const struct flow *flow,
2347 struct mcast_snooping *ms, int vlan,
2348 struct xbundle *in_xbundle,
2349 const struct dp_packet *packet)
86e2dcdd
FL
2350 OVS_REQ_WRLOCK(ms->rwlock)
2351{
46445c63 2352 const struct igmp_header *igmp;
e3102e42 2353 int count;
46445c63 2354 size_t offset;
06994f87 2355 ovs_be32 ip4 = flow->igmp_group_ip4;
86e2dcdd 2356
46445c63
EC
2357 offset = (char *) dp_packet_l4(packet) - (char *) dp_packet_data(packet);
2358 igmp = dp_packet_at(packet, offset, IGMP_HEADER_LEN);
2359 if (!igmp || csum(igmp, dp_packet_l4_size(packet)) != 0) {
2d9b49dd
BP
2360 xlate_report_debug(ctx, OFT_DETAIL,
2361 "multicast snooping received bad IGMP "
2362 "checksum on port %s in VLAN %d",
2363 in_xbundle->name, vlan);
46445c63
EC
2364 return;
2365 }
2366
86e2dcdd
FL
2367 switch (ntohs(flow->tp_src)) {
2368 case IGMP_HOST_MEMBERSHIP_REPORT:
2369 case IGMPV2_HOST_MEMBERSHIP_REPORT:
964a4d5f 2370 if (mcast_snooping_add_group4(ms, ip4, vlan, in_xbundle->ofbundle)) {
2d9b49dd
BP
2371 xlate_report_debug(ctx, OFT_DETAIL,
2372 "multicast snooping learned that "
2373 IP_FMT" is on port %s in VLAN %d",
2374 IP_ARGS(ip4), in_xbundle->name, vlan);
86e2dcdd
FL
2375 }
2376 break;
2377 case IGMP_HOST_LEAVE_MESSAGE:
964a4d5f 2378 if (mcast_snooping_leave_group4(ms, ip4, vlan, in_xbundle->ofbundle)) {
2d9b49dd
BP
2379 xlate_report_debug(ctx, OFT_DETAIL, "multicast snooping leaving "
2380 IP_FMT" is on port %s in VLAN %d",
2381 IP_ARGS(ip4), in_xbundle->name, vlan);
86e2dcdd
FL
2382 }
2383 break;
2384 case IGMP_HOST_MEMBERSHIP_QUERY:
2385 if (flow->nw_src && mcast_snooping_add_mrouter(ms, vlan,
2d9b49dd
BP
2386 in_xbundle->ofbundle)) {
2387 xlate_report_debug(ctx, OFT_DETAIL, "multicast snooping query "
2388 "from "IP_FMT" is on port %s in VLAN %d",
2389 IP_ARGS(flow->nw_src), in_xbundle->name, vlan);
86e2dcdd
FL
2390 }
2391 break;
e3102e42 2392 case IGMPV3_HOST_MEMBERSHIP_REPORT:
2d9b49dd
BP
2393 count = mcast_snooping_add_report(ms, packet, vlan,
2394 in_xbundle->ofbundle);
2395 if (count) {
2396 xlate_report_debug(ctx, OFT_DETAIL, "multicast snooping processed "
2397 "%d addresses on port %s in VLAN %d",
2398 count, in_xbundle->name, vlan);
e3102e42
TLSC
2399 }
2400 break;
86e2dcdd
FL
2401 }
2402}
2403
06994f87 2404static void
2d9b49dd 2405update_mcast_snooping_table6__(const struct xlate_ctx *ctx,
06994f87
TLSC
2406 const struct flow *flow,
2407 struct mcast_snooping *ms, int vlan,
2408 struct xbundle *in_xbundle,
2409 const struct dp_packet *packet)
2410 OVS_REQ_WRLOCK(ms->rwlock)
2411{
46445c63 2412 const struct mld_header *mld;
06994f87 2413 int count;
46445c63
EC
2414 size_t offset;
2415
2416 offset = (char *) dp_packet_l4(packet) - (char *) dp_packet_data(packet);
2417 mld = dp_packet_at(packet, offset, MLD_HEADER_LEN);
2418
2419 if (!mld ||
2420 packet_csum_upperlayer6(dp_packet_l3(packet),
2421 mld, IPPROTO_ICMPV6,
2422 dp_packet_l4_size(packet)) != 0) {
2d9b49dd
BP
2423 xlate_report_debug(ctx, OFT_DETAIL, "multicast snooping received "
2424 "bad MLD checksum on port %s in VLAN %d",
2425 in_xbundle->name, vlan);
46445c63
EC
2426 return;
2427 }
06994f87
TLSC
2428
2429 switch (ntohs(flow->tp_src)) {
2430 case MLD_QUERY:
2431 if (!ipv6_addr_equals(&flow->ipv6_src, &in6addr_any)
2432 && mcast_snooping_add_mrouter(ms, vlan, in_xbundle->ofbundle)) {
2d9b49dd
BP
2433 xlate_report_debug(ctx, OFT_DETAIL, "multicast snooping query on "
2434 "port %s in VLAN %d", in_xbundle->name, vlan);
06994f87
TLSC
2435 }
2436 break;
2437 case MLD_REPORT:
2438 case MLD_DONE:
2439 case MLD2_REPORT:
2440 count = mcast_snooping_add_mld(ms, packet, vlan, in_xbundle->ofbundle);
2441 if (count) {
2d9b49dd
BP
2442 xlate_report_debug(ctx, OFT_DETAIL, "multicast snooping processed "
2443 "%d addresses on port %s in VLAN %d",
2444 count, in_xbundle->name, vlan);
06994f87
TLSC
2445 }
2446 break;
2447 }
2448}
2449
86e2dcdd
FL
2450/* Updates multicast snooping table 'ms' given that a packet matching 'flow'
2451 * was received on 'in_xbundle' in 'vlan'. */
2452static void
2d9b49dd 2453update_mcast_snooping_table(const struct xlate_ctx *ctx,
86e2dcdd 2454 const struct flow *flow, int vlan,
e3102e42
TLSC
2455 struct xbundle *in_xbundle,
2456 const struct dp_packet *packet)
86e2dcdd 2457{
2d9b49dd 2458 struct mcast_snooping *ms = ctx->xbridge->ms;
86e2dcdd
FL
2459 struct xlate_cfg *xcfg;
2460 struct xbundle *mcast_xbundle;
f4ae6e23 2461 struct mcast_port_bundle *fport;
86e2dcdd
FL
2462
2463 /* Don't learn the OFPP_NONE port. */
2464 if (in_xbundle == &ofpp_none_bundle) {
2465 return;
2466 }
2467
2468 /* Don't learn from flood ports */
2469 mcast_xbundle = NULL;
2470 ovs_rwlock_wrlock(&ms->rwlock);
2471 xcfg = ovsrcu_get(struct xlate_cfg *, &xcfgp);
f4ae6e23 2472 LIST_FOR_EACH(fport, node, &ms->fport_list) {
86e2dcdd
FL
2473 mcast_xbundle = xbundle_lookup(xcfg, fport->port);
2474 if (mcast_xbundle == in_xbundle) {
2475 break;
2476 }
2477 }
2478
2479 if (!mcast_xbundle || mcast_xbundle != in_xbundle) {
06994f87 2480 if (flow->dl_type == htons(ETH_TYPE_IP)) {
2d9b49dd 2481 update_mcast_snooping_table4__(ctx, flow, ms, vlan,
06994f87
TLSC
2482 in_xbundle, packet);
2483 } else {
2d9b49dd 2484 update_mcast_snooping_table6__(ctx, flow, ms, vlan,
06994f87
TLSC
2485 in_xbundle, packet);
2486 }
86e2dcdd
FL
2487 }
2488 ovs_rwlock_unlock(&ms->rwlock);
2489}
2490
2491/* send the packet to ports having the multicast group learned */
2492static void
2493xlate_normal_mcast_send_group(struct xlate_ctx *ctx,
2494 struct mcast_snooping *ms OVS_UNUSED,
2495 struct mcast_group *grp,
f0fb825a
EG
2496 struct xbundle *in_xbundle,
2497 const struct xvlan *xvlan)
86e2dcdd
FL
2498 OVS_REQ_RDLOCK(ms->rwlock)
2499{
2500 struct xlate_cfg *xcfg;
2501 struct mcast_group_bundle *b;
2502 struct xbundle *mcast_xbundle;
2503
2504 xcfg = ovsrcu_get(struct xlate_cfg *, &xcfgp);
2505 LIST_FOR_EACH(b, bundle_node, &grp->bundle_lru) {
2506 mcast_xbundle = xbundle_lookup(xcfg, b->port);
2507 if (mcast_xbundle && mcast_xbundle != in_xbundle) {
2d9b49dd 2508 xlate_report(ctx, OFT_DETAIL, "forwarding to mcast group port");
f0fb825a 2509 output_normal(ctx, mcast_xbundle, xvlan);
86e2dcdd 2510 } else if (!mcast_xbundle) {
2d9b49dd
BP
2511 xlate_report(ctx, OFT_WARN,
2512 "mcast group port is unknown, dropping");
86e2dcdd 2513 } else {
2d9b49dd
BP
2514 xlate_report(ctx, OFT_DETAIL,
2515 "mcast group port is input port, dropping");
86e2dcdd
FL
2516 }
2517 }
2518}
2519
2520/* send the packet to ports connected to multicast routers */
2521static void
2522xlate_normal_mcast_send_mrouters(struct xlate_ctx *ctx,
2523 struct mcast_snooping *ms,
f0fb825a
EG
2524 struct xbundle *in_xbundle,
2525 const struct xvlan *xvlan)
86e2dcdd
FL
2526 OVS_REQ_RDLOCK(ms->rwlock)
2527{
2528 struct xlate_cfg *xcfg;
2529 struct mcast_mrouter_bundle *mrouter;
2530 struct xbundle *mcast_xbundle;
2531
2532 xcfg = ovsrcu_get(struct xlate_cfg *, &xcfgp);
2533 LIST_FOR_EACH(mrouter, mrouter_node, &ms->mrouter_lru) {
2534 mcast_xbundle = xbundle_lookup(xcfg, mrouter->port);
94a881c1 2535 if (mcast_xbundle && mcast_xbundle != in_xbundle
f0fb825a 2536 && mrouter->vlan == xvlan->v[0].vid) {
2d9b49dd 2537 xlate_report(ctx, OFT_DETAIL, "forwarding to mcast router port");
f0fb825a 2538 output_normal(ctx, mcast_xbundle, xvlan);
86e2dcdd 2539 } else if (!mcast_xbundle) {
2d9b49dd
BP
2540 xlate_report(ctx, OFT_WARN,
2541 "mcast router port is unknown, dropping");
f0fb825a 2542 } else if (mrouter->vlan != xvlan->v[0].vid) {
2d9b49dd
BP
2543 xlate_report(ctx, OFT_DETAIL,
2544 "mcast router is on another vlan, dropping");
86e2dcdd 2545 } else {
2d9b49dd
BP
2546 xlate_report(ctx, OFT_DETAIL,
2547 "mcast router port is input port, dropping");
86e2dcdd
FL
2548 }
2549 }
2550}
2551
2552/* send the packet to ports flagged to be flooded */
2553static void
2554xlate_normal_mcast_send_fports(struct xlate_ctx *ctx,
2555 struct mcast_snooping *ms,
f0fb825a
EG
2556 struct xbundle *in_xbundle,
2557 const struct xvlan *xvlan)
86e2dcdd
FL
2558 OVS_REQ_RDLOCK(ms->rwlock)
2559{
2560 struct xlate_cfg *xcfg;
f4ae6e23 2561 struct mcast_port_bundle *fport;
86e2dcdd
FL
2562 struct xbundle *mcast_xbundle;
2563
2564 xcfg = ovsrcu_get(struct xlate_cfg *, &xcfgp);
f4ae6e23 2565 LIST_FOR_EACH(fport, node, &ms->fport_list) {
86e2dcdd
FL
2566 mcast_xbundle = xbundle_lookup(xcfg, fport->port);
2567 if (mcast_xbundle && mcast_xbundle != in_xbundle) {
2d9b49dd 2568 xlate_report(ctx, OFT_DETAIL, "forwarding to mcast flood port");
f0fb825a 2569 output_normal(ctx, mcast_xbundle, xvlan);
86e2dcdd 2570 } else if (!mcast_xbundle) {
2d9b49dd
BP
2571 xlate_report(ctx, OFT_WARN,
2572 "mcast flood port is unknown, dropping");
86e2dcdd 2573 } else {
2d9b49dd
BP
2574 xlate_report(ctx, OFT_DETAIL,
2575 "mcast flood port is input port, dropping");
86e2dcdd
FL
2576 }
2577 }
2578}
2579
8e04a33f
FL
2580/* forward the Reports to configured ports */
2581static void
2582xlate_normal_mcast_send_rports(struct xlate_ctx *ctx,
2583 struct mcast_snooping *ms,
f0fb825a
EG
2584 struct xbundle *in_xbundle,
2585 const struct xvlan *xvlan)
8e04a33f
FL
2586 OVS_REQ_RDLOCK(ms->rwlock)
2587{
2588 struct xlate_cfg *xcfg;
2589 struct mcast_port_bundle *rport;
2590 struct xbundle *mcast_xbundle;
2591
2592 xcfg = ovsrcu_get(struct xlate_cfg *, &xcfgp);
2593 LIST_FOR_EACH(rport, node, &ms->rport_list) {
2594 mcast_xbundle = xbundle_lookup(xcfg, rport->port);
2595 if (mcast_xbundle && mcast_xbundle != in_xbundle) {
2d9b49dd
BP
2596 xlate_report(ctx, OFT_DETAIL,
2597 "forwarding report to mcast flagged port");
f0fb825a 2598 output_normal(ctx, mcast_xbundle, xvlan);
8e04a33f 2599 } else if (!mcast_xbundle) {
2d9b49dd
BP
2600 xlate_report(ctx, OFT_WARN,
2601 "mcast port is unknown, dropping the report");
8e04a33f 2602 } else {
2d9b49dd
BP
2603 xlate_report(ctx, OFT_DETAIL,
2604 "mcast port is input port, dropping the Report");
8e04a33f
FL
2605 }
2606 }
2607}
2608
682800a4
FL
2609static void
2610xlate_normal_flood(struct xlate_ctx *ctx, struct xbundle *in_xbundle,
f0fb825a 2611 struct xvlan *xvlan)
682800a4
FL
2612{
2613 struct xbundle *xbundle;
2614
2615 LIST_FOR_EACH (xbundle, list_node, &ctx->xbridge->xbundles) {
2616 if (xbundle != in_xbundle
f0fb825a 2617 && xbundle_includes_vlan(xbundle, xvlan)
682800a4
FL
2618 && xbundle->floodable
2619 && !xbundle_mirror_out(ctx->xbridge, xbundle)) {
f0fb825a 2620 output_normal(ctx, xbundle, xvlan);
682800a4
FL
2621 }
2622 }
2031ef97 2623 ctx->nf_output_iface = NF_OUT_FLOOD;
682800a4
FL
2624}
2625
a75636c8
BP
2626static bool
2627is_ip_local_multicast(const struct flow *flow, struct flow_wildcards *wc)
2628{
2629 if (flow->dl_type == htons(ETH_TYPE_IP)) {
2630 memset(&wc->masks.nw_dst, 0xff, sizeof wc->masks.nw_dst);
2631 return ip_is_local_multicast(flow->nw_dst);
2632 } else if (flow->dl_type == htons(ETH_TYPE_IPV6)) {
2633 memset(&wc->masks.ipv6_dst, 0xff, sizeof wc->masks.ipv6_dst);
2634 return ipv6_is_all_hosts(&flow->ipv6_dst);
2635 } else {
2636 return false;
2637 }
2638}
2639
9583bc14
EJ
2640static void
2641xlate_normal(struct xlate_ctx *ctx)
2642{
49a73e0c 2643 struct flow_wildcards *wc = ctx->wc;
33bf9176 2644 struct flow *flow = &ctx->xin->flow;
46c88433
EJ
2645 struct xbundle *in_xbundle;
2646 struct xport *in_port;
9583bc14 2647 struct mac_entry *mac;
d6d5bbc9 2648 void *mac_port;
f0fb825a
EG
2649 struct xvlan in_xvlan;
2650 struct xvlan xvlan;
9583bc14 2651 uint16_t vlan;
9583bc14 2652
33bf9176
BP
2653 memset(&wc->masks.dl_src, 0xff, sizeof wc->masks.dl_src);
2654 memset(&wc->masks.dl_dst, 0xff, sizeof wc->masks.dl_dst);
f0fb825a 2655 wc->masks.vlans[0].tci |= htons(VLAN_VID_MASK | VLAN_CFI);
9583bc14 2656
2d9b49dd 2657 in_xbundle = lookup_input_bundle(ctx, flow->in_port.ofp_port, &in_port);
46c88433 2658 if (!in_xbundle) {
2d9b49dd 2659 xlate_report(ctx, OFT_WARN, "no input bundle, dropping");
9583bc14
EJ
2660 return;
2661 }
2662
2663 /* Drop malformed frames. */
f0fb825a
EG
2664 if (eth_type_vlan(flow->dl_type) &&
2665 !(flow->vlans[0].tci & htons(VLAN_CFI))) {
9583bc14 2666 if (ctx->xin->packet != NULL) {
2d9b49dd
BP
2667 xlate_report_error(ctx, "dropping packet with partial "
2668 "VLAN tag received on port %s",
2669 in_xbundle->name);
9583bc14 2670 }
2d9b49dd 2671 xlate_report(ctx, OFT_WARN, "partial VLAN tag, dropping");
9583bc14
EJ
2672 return;
2673 }
2674
2675 /* Drop frames on bundles reserved for mirroring. */
46c88433 2676 if (xbundle_mirror_out(ctx->xbridge, in_xbundle)) {
9583bc14 2677 if (ctx->xin->packet != NULL) {
2d9b49dd
BP
2678 xlate_report_error(ctx, "dropping packet received on port %s, "
2679 "which is reserved exclusively for mirroring",
2680 in_xbundle->name);
9583bc14 2681 }
2d9b49dd
BP
2682 xlate_report(ctx, OFT_WARN,
2683 "input port is mirror output port, dropping");
9583bc14
EJ
2684 return;
2685 }
2686
2687 /* Check VLAN. */
f0fb825a
EG
2688 xvlan_extract(flow, &in_xvlan);
2689 if (!input_vid_is_valid(ctx, in_xvlan.v[0].vid, in_xbundle)) {
2d9b49dd
BP
2690 xlate_report(ctx, OFT_WARN,
2691 "disallowed VLAN VID for this input port, dropping");
9583bc14
EJ
2692 return;
2693 }
f0fb825a
EG
2694 xvlan_input_translate(in_xbundle, &in_xvlan, &xvlan);
2695 vlan = xvlan.v[0].vid;
9583bc14
EJ
2696
2697 /* Check other admissibility requirements. */
2698 if (in_port && !is_admissible(ctx, in_port, vlan)) {
2699 return;
2700 }
2701
2702 /* Learn source MAC. */
064799a1 2703 bool is_grat_arp = is_gratuitous_arp(flow, wc);
875ab130
BP
2704 if (ctx->xin->allow_side_effects
2705 && flow->packet_type == htonl(PT_ETH)
2706 && in_port->pt_mode != NETDEV_PT_LEGACY_L3
2707 ) {
2d9b49dd 2708 update_learning_table(ctx, in_xbundle, flow->dl_src, vlan,
064799a1 2709 is_grat_arp);
9583bc14 2710 }
064799a1 2711 if (ctx->xin->xcache && in_xbundle != &ofpp_none_bundle) {
b256dc52
JS
2712 struct xc_entry *entry;
2713
064799a1 2714 /* Save just enough info to update mac learning table later. */
b256dc52 2715 entry = xlate_cache_add_entry(ctx->xin->xcache, XC_NORMAL);
901a517e
JR
2716 entry->normal.ofproto = ctx->xbridge->ofproto;
2717 entry->normal.in_port = flow->in_port.ofp_port;
2718 entry->normal.dl_src = flow->dl_src;
2719 entry->normal.vlan = vlan;
2720 entry->normal.is_gratuitous_arp = is_grat_arp;
b256dc52 2721 }
9583bc14
EJ
2722
2723 /* Determine output bundle. */
86e2dcdd
FL
2724 if (mcast_snooping_enabled(ctx->xbridge->ms)
2725 && !eth_addr_is_broadcast(flow->dl_dst)
2726 && eth_addr_is_multicast(flow->dl_dst)
06994f87 2727 && is_ip_any(flow)) {
86e2dcdd 2728 struct mcast_snooping *ms = ctx->xbridge->ms;
06994f87 2729 struct mcast_group *grp = NULL;
86e2dcdd 2730
a75636c8 2731 if (is_igmp(flow, wc)) {
1bc24169
BP
2732 /*
2733 * IGMP packets need to take the slow path, in order to be
2734 * processed for mdb updates. That will prevent expires
2735 * firing off even after hosts have sent reports.
2736 */
2737 ctx->xout->slow |= SLOW_ACTION;
2738
a75636c8 2739 memset(&wc->masks.tp_src, 0xff, sizeof wc->masks.tp_src);
d29f137b
TLSC
2740 if (mcast_snooping_is_membership(flow->tp_src) ||
2741 mcast_snooping_is_query(flow->tp_src)) {
df70a773 2742 if (ctx->xin->allow_side_effects && ctx->xin->packet) {
2d9b49dd 2743 update_mcast_snooping_table(ctx, flow, vlan,
e3102e42 2744 in_xbundle, ctx->xin->packet);
d29f137b 2745 }
86e2dcdd 2746 }
d6d5bbc9 2747
86e2dcdd
FL
2748 if (mcast_snooping_is_membership(flow->tp_src)) {
2749 ovs_rwlock_rdlock(&ms->rwlock);
f0fb825a 2750 xlate_normal_mcast_send_mrouters(ctx, ms, in_xbundle, &xvlan);
8e04a33f
FL
2751 /* RFC4541: section 2.1.1, item 1: A snooping switch should
2752 * forward IGMP Membership Reports only to those ports where
2753 * multicast routers are attached. Alternatively stated: a
2754 * snooping switch should not forward IGMP Membership Reports
2755 * to ports on which only hosts are attached.
2756 * An administrative control may be provided to override this
2757 * restriction, allowing the report messages to be flooded to
2758 * other ports. */
f0fb825a 2759 xlate_normal_mcast_send_rports(ctx, ms, in_xbundle, &xvlan);
86e2dcdd
FL
2760 ovs_rwlock_unlock(&ms->rwlock);
2761 } else {
2d9b49dd 2762 xlate_report(ctx, OFT_DETAIL, "multicast traffic, flooding");
f0fb825a 2763 xlate_normal_flood(ctx, in_xbundle, &xvlan);
86e2dcdd
FL
2764 }
2765 return;
a75636c8 2766 } else if (is_mld(flow, wc)) {
06994f87 2767 ctx->xout->slow |= SLOW_ACTION;
df70a773 2768 if (ctx->xin->allow_side_effects && ctx->xin->packet) {
2d9b49dd 2769 update_mcast_snooping_table(ctx, flow, vlan,
06994f87
TLSC
2770 in_xbundle, ctx->xin->packet);
2771 }
a75636c8 2772 if (is_mld_report(flow, wc)) {
06994f87 2773 ovs_rwlock_rdlock(&ms->rwlock);
f0fb825a
EG
2774 xlate_normal_mcast_send_mrouters(ctx, ms, in_xbundle, &xvlan);
2775 xlate_normal_mcast_send_rports(ctx, ms, in_xbundle, &xvlan);
06994f87
TLSC
2776 ovs_rwlock_unlock(&ms->rwlock);
2777 } else {
2d9b49dd 2778 xlate_report(ctx, OFT_DETAIL, "MLD query, flooding");
f0fb825a 2779 xlate_normal_flood(ctx, in_xbundle, &xvlan);
06994f87 2780 }
86e2dcdd 2781 } else {
a75636c8 2782 if (is_ip_local_multicast(flow, wc)) {
86e2dcdd
FL
2783 /* RFC4541: section 2.1.2, item 2: Packets with a dst IP
2784 * address in the 224.0.0.x range which are not IGMP must
2785 * be forwarded on all ports */
2d9b49dd
BP
2786 xlate_report(ctx, OFT_DETAIL,
2787 "RFC4541: section 2.1.2, item 2, flooding");
f0fb825a 2788 xlate_normal_flood(ctx, in_xbundle, &xvlan);
86e2dcdd
FL
2789 return;
2790 }
2791 }
2792
2793 /* forwarding to group base ports */
2794 ovs_rwlock_rdlock(&ms->rwlock);
06994f87
TLSC
2795 if (flow->dl_type == htons(ETH_TYPE_IP)) {
2796 grp = mcast_snooping_lookup4(ms, flow->nw_dst, vlan);
2797 } else if (flow->dl_type == htons(ETH_TYPE_IPV6)) {
2798 grp = mcast_snooping_lookup(ms, &flow->ipv6_dst, vlan);
2799 }
86e2dcdd 2800 if (grp) {
f0fb825a
EG
2801 xlate_normal_mcast_send_group(ctx, ms, grp, in_xbundle, &xvlan);
2802 xlate_normal_mcast_send_fports(ctx, ms, in_xbundle, &xvlan);
2803 xlate_normal_mcast_send_mrouters(ctx, ms, in_xbundle, &xvlan);
9583bc14 2804 } else {
86e2dcdd 2805 if (mcast_snooping_flood_unreg(ms)) {
2d9b49dd
BP
2806 xlate_report(ctx, OFT_DETAIL,
2807 "unregistered multicast, flooding");
f0fb825a 2808 xlate_normal_flood(ctx, in_xbundle, &xvlan);
86e2dcdd 2809 } else {
f0fb825a
EG
2810 xlate_normal_mcast_send_mrouters(ctx, ms, in_xbundle, &xvlan);
2811 xlate_normal_mcast_send_fports(ctx, ms, in_xbundle, &xvlan);
86e2dcdd 2812 }
9583bc14 2813 }
86e2dcdd 2814 ovs_rwlock_unlock(&ms->rwlock);
9583bc14 2815 } else {
86e2dcdd
FL
2816 ovs_rwlock_rdlock(&ctx->xbridge->ml->rwlock);
2817 mac = mac_learning_lookup(ctx->xbridge->ml, flow->dl_dst, vlan);
9d078ec2 2818 mac_port = mac ? mac_entry_get_port(ctx->xbridge->ml, mac) : NULL;
86e2dcdd
FL
2819 ovs_rwlock_unlock(&ctx->xbridge->ml->rwlock);
2820
2821 if (mac_port) {
2822 struct xlate_cfg *xcfg = ovsrcu_get(struct xlate_cfg *, &xcfgp);
2823 struct xbundle *mac_xbundle = xbundle_lookup(xcfg, mac_port);
2824 if (mac_xbundle && mac_xbundle != in_xbundle) {
2d9b49dd 2825 xlate_report(ctx, OFT_DETAIL, "forwarding to learned port");
f0fb825a 2826 output_normal(ctx, mac_xbundle, &xvlan);
86e2dcdd 2827 } else if (!mac_xbundle) {
2d9b49dd
BP
2828 xlate_report(ctx, OFT_WARN,
2829 "learned port is unknown, dropping");
86e2dcdd 2830 } else {
2d9b49dd
BP
2831 xlate_report(ctx, OFT_DETAIL,
2832 "learned port is input port, dropping");
86e2dcdd
FL
2833 }
2834 } else {
2d9b49dd
BP
2835 xlate_report(ctx, OFT_DETAIL,
2836 "no learned MAC for destination, flooding");
f0fb825a 2837 xlate_normal_flood(ctx, in_xbundle, &xvlan);
86e2dcdd 2838 }
9583bc14
EJ
2839 }
2840}
2841
a6092018
BP
2842/* Appends a "sample" action for sFlow or IPFIX to 'ctx->odp_actions'. The
2843 * 'probability' is the number of packets out of UINT32_MAX to sample. The
2844 * 'cookie' (of length 'cookie_size' bytes) is passed back in the callback for
2845 * each sampled packet. 'tunnel_out_port', if not ODPP_NONE, is added as the
2846 * OVS_USERSPACE_ATTR_EGRESS_TUN_PORT attribute. If 'include_actions', an
f69f713b
BY
2847 * OVS_USERSPACE_ATTR_ACTIONS attribute is added. If 'emit_set_tunnel',
2848 * sample(sampling_port=1) would translate into datapath sample action
2849 * set(tunnel(...)), sample(...) and it is used for sampling egress tunnel
2850 * information.
9583bc14
EJ
2851 */
2852static size_t
a6092018 2853compose_sample_action(struct xlate_ctx *ctx,
9583bc14
EJ
2854 const uint32_t probability,
2855 const union user_action_cookie *cookie,
8b7ea2d4 2856 const size_t cookie_size,
7321bda3
NM
2857 const odp_port_t tunnel_out_port,
2858 bool include_actions)
9583bc14 2859{
b97f2c3a
BY
2860 if (probability == 0) {
2861 /* No need to generate sampling or the inner action. */
2862 return 0;
2863 }
2864
31b29c2e
AZ
2865 /* If the slow path meter is configured by the controller,
2866 * insert a meter action before the user space action. */
2867 struct ofproto *ofproto = &ctx->xin->ofproto->up;
2868 uint32_t meter_id = ofproto->slowpath_meter_id;
2869
2870 /* When meter action is not required, avoid generate sample action
2871 * for 100% sampling rate. */
2872 bool is_sample = probability < UINT32_MAX || meter_id != UINT32_MAX;
72471622
BY
2873 size_t sample_offset, actions_offset;
2874 if (is_sample) {
2875 sample_offset = nl_msg_start_nested(ctx->odp_actions,
2876 OVS_ACTION_ATTR_SAMPLE);
2877 nl_msg_put_u32(ctx->odp_actions, OVS_SAMPLE_ATTR_PROBABILITY,
2878 probability);
2879 actions_offset = nl_msg_start_nested(ctx->odp_actions,
2880 OVS_SAMPLE_ATTR_ACTIONS);
2881 }
9583bc14 2882
31b29c2e
AZ
2883 if (meter_id != UINT32_MAX) {
2884 nl_msg_put_u32(ctx->odp_actions, OVS_ACTION_ATTR_METER, meter_id);
2885 }
2886
a6092018
BP
2887 odp_port_t odp_port = ofp_port_to_odp_port(
2888 ctx->xbridge, ctx->xin->flow.in_port.ofp_port);
2889 uint32_t pid = dpif_port_get_pid(ctx->xbridge->dpif, odp_port,
2890 flow_hash_5tuple(&ctx->xin->flow, 0));
2891 int cookie_offset = odp_put_userspace_action(pid, cookie, cookie_size,
2892 tunnel_out_port,
2893 include_actions,
2894 ctx->odp_actions);
89a8a7f0 2895
72471622
BY
2896 if (is_sample) {
2897 nl_msg_end_nested(ctx->odp_actions, actions_offset);
2898 nl_msg_end_nested(ctx->odp_actions, sample_offset);
2899 }
9583bc14 2900
9583bc14
EJ
2901 return cookie_offset;
2902}
2903
a6092018
BP
2904/* If sFLow is not enabled, returns 0 without doing anything.
2905 *
2906 * If sFlow is enabled, appends a template "sample" action to the ODP actions
2907 * in 'ctx'. This action is a template because some of the information needed
2908 * to fill it out is not available until flow translation is complete. In this
2909 * case, this functions returns an offset, which is always nonzero, to pass
2910 * later to fix_sflow_action() to fill in the rest of the template. */
9583bc14 2911static size_t
a6092018 2912compose_sflow_action(struct xlate_ctx *ctx)
9583bc14 2913{
a6092018
BP
2914 struct dpif_sflow *sflow = ctx->xbridge->sflow;
2915 if (!sflow || ctx->xin->flow.in_port.ofp_port == OFPP_NONE) {
9583bc14
EJ
2916 return 0;
2917 }
2918
a6092018
BP
2919 union user_action_cookie cookie = { .type = USER_ACTION_COOKIE_SFLOW };
2920 return compose_sample_action(ctx, dpif_sflow_get_probability(sflow),
7321bda3
NM
2921 &cookie, sizeof cookie.sflow, ODPP_NONE,
2922 true);
9583bc14
EJ
2923}
2924
f69f713b
BY
2925/* If flow IPFIX is enabled, make sure IPFIX flow sample action
2926 * at egress point of tunnel port is just in front of corresponding
2927 * output action. If bridge IPFIX is enabled, this appends an IPFIX
2928 * sample action to 'ctx->odp_actions'. */
9583bc14 2929static void
a6092018 2930compose_ipfix_action(struct xlate_ctx *ctx, odp_port_t output_odp_port)
9583bc14 2931{
a6092018 2932 struct dpif_ipfix *ipfix = ctx->xbridge->ipfix;
8b7ea2d4 2933 odp_port_t tunnel_out_port = ODPP_NONE;
9583bc14 2934
a6092018 2935 if (!ipfix || ctx->xin->flow.in_port.ofp_port == OFPP_NONE) {
9583bc14
EJ
2936 return;
2937 }
2938
8b7ea2d4
WZ
2939 /* For input case, output_odp_port is ODPP_NONE, which is an invalid port
2940 * number. */
2941 if (output_odp_port == ODPP_NONE &&
a6092018 2942 !dpif_ipfix_get_bridge_exporter_input_sampling(ipfix)) {
8b7ea2d4
WZ
2943 return;
2944 }
2945
f69f713b 2946 /* For output case, output_odp_port is valid. */
8b7ea2d4 2947 if (output_odp_port != ODPP_NONE) {
a6092018 2948 if (!dpif_ipfix_get_bridge_exporter_output_sampling(ipfix)) {
8b7ea2d4
WZ
2949 return;
2950 }
2951 /* If tunnel sampling is enabled, put an additional option attribute:
2952 * OVS_USERSPACE_ATTR_TUNNEL_OUT_PORT
2953 */
a6092018
BP
2954 if (dpif_ipfix_get_bridge_exporter_tunnel_sampling(ipfix) &&
2955 dpif_ipfix_get_tunnel_port(ipfix, output_odp_port) ) {
8b7ea2d4
WZ
2956 tunnel_out_port = output_odp_port;
2957 }
2958 }
2959
a6092018
BP
2960 union user_action_cookie cookie = {
2961 .ipfix = {
2962 .type = USER_ACTION_COOKIE_IPFIX,
2963 .output_odp_port = output_odp_port,
2964 }
2965 };
2966 compose_sample_action(ctx,
2967 dpif_ipfix_get_bridge_exporter_probability(ipfix),
7321bda3
NM
2968 &cookie, sizeof cookie.ipfix, tunnel_out_port,
2969 false);
9583bc14
EJ
2970}
2971
a6092018
BP
2972/* Fix "sample" action according to data collected while composing ODP actions,
2973 * as described in compose_sflow_action().
2974 *
2975 * 'user_cookie_offset' must be the offset returned by add_sflow_action(). */
9583bc14 2976static void
a6092018 2977fix_sflow_action(struct xlate_ctx *ctx, unsigned int user_cookie_offset)
9583bc14
EJ
2978{
2979 const struct flow *base = &ctx->base_flow;
2980 union user_action_cookie *cookie;
2981
a6092018 2982 cookie = ofpbuf_at(ctx->odp_actions, user_cookie_offset,
9583bc14
EJ
2983 sizeof cookie->sflow);
2984 ovs_assert(cookie->type == USER_ACTION_COOKIE_SFLOW);
2985
a6092018 2986 cookie->type = USER_ACTION_COOKIE_SFLOW;
f0fb825a 2987 cookie->sflow.vlan_tci = base->vlans[0].tci;
a6092018
BP
2988
2989 /* See http://www.sflow.org/sflow_version_5.txt (search for "Input/output
2990 * port information") for the interpretation of cookie->output. */
2991 switch (ctx->sflow_n_outputs) {
2992 case 0:
2993 /* 0x40000000 | 256 means "packet dropped for unknown reason". */
2994 cookie->sflow.output = 0x40000000 | 256;
2995 break;
2996
2997 case 1:
2998 cookie->sflow.output = dpif_sflow_odp_port_to_ifindex(
2999 ctx->xbridge->sflow, ctx->sflow_odp_port);
3000 if (cookie->sflow.output) {
3001 break;
3002 }
3003 /* Fall through. */
3004 default:
3005 /* 0x80000000 means "multiple output ports. */
3006 cookie->sflow.output = 0x80000000 | ctx->sflow_n_outputs;
3007 break;
3008 }
9583bc14
EJ
3009}
3010
515793d5
BP
3011static bool
3012process_special(struct xlate_ctx *ctx, const struct xport *xport)
db7d4e46 3013{
515793d5 3014 const struct flow *flow = &ctx->xin->flow;
49a73e0c 3015 struct flow_wildcards *wc = ctx->wc;
46c88433 3016 const struct xbridge *xbridge = ctx->xbridge;
515793d5
BP
3017 const struct dp_packet *packet = ctx->xin->packet;
3018 enum slow_path_reason slow;
642dc74d 3019
46c88433 3020 if (!xport) {
515793d5 3021 slow = 0;
46c88433 3022 } else if (xport->cfm && cfm_should_process_flow(xport->cfm, flow, wc)) {
db7d4e46 3023 if (packet) {
46c88433 3024 cfm_process_heartbeat(xport->cfm, packet);
db7d4e46 3025 }
515793d5 3026 slow = SLOW_CFM;
fab52e16 3027 } else if (xport->bfd && bfd_should_process_flow(xport->bfd, flow, wc)) {
db7d4e46 3028 if (packet) {
46c88433 3029 bfd_process_packet(xport->bfd, flow, packet);
60d02c72
AW
3030 /* If POLL received, immediately sends FINAL back. */
3031 if (bfd_should_send_packet(xport->bfd)) {
6d308b28 3032 ofproto_dpif_monitor_port_send_soon(xport->ofport);
60d02c72 3033 }
db7d4e46 3034 }
515793d5 3035 slow = SLOW_BFD;
46c88433 3036 } else if (xport->xbundle && xport->xbundle->lacp
db7d4e46
JP
3037 && flow->dl_type == htons(ETH_TYPE_LACP)) {
3038 if (packet) {
46c88433 3039 lacp_process_packet(xport->xbundle->lacp, xport->ofport, packet);
db7d4e46 3040 }
515793d5 3041 slow = SLOW_LACP;
9efd308e
DV
3042 } else if ((xbridge->stp || xbridge->rstp) &&
3043 stp_should_process_flow(flow, wc)) {
db7d4e46 3044 if (packet) {
f025bcb7
JR
3045 xbridge->stp
3046 ? stp_process_packet(xport, packet)
3047 : rstp_process_packet(xport, packet);
db7d4e46 3048 }
515793d5 3049 slow = SLOW_STP;
19aef6ef 3050 } else if (xport->lldp && lldp_should_process_flow(xport->lldp, flow)) {
0477baa9
DF
3051 if (packet) {
3052 lldp_process_packet(xport->lldp, packet);
3053 }
515793d5 3054 slow = SLOW_LLDP;
db7d4e46 3055 } else {
515793d5
BP
3056 slow = 0;
3057 }
3058
3059 if (slow) {
3060 ctx->xout->slow |= slow;
3061 return true;
3062 } else {
3063 return false;
db7d4e46
JP
3064 }
3065}
3066
a36de779
PS
3067static int
3068tnl_route_lookup_flow(const struct flow *oflow,
a8704b50
PS
3069 struct in6_addr *ip, struct in6_addr *src,
3070 struct xport **out_port)
a36de779
PS
3071{
3072 char out_dev[IFNAMSIZ];
3073 struct xbridge *xbridge;
3074 struct xlate_cfg *xcfg;
c2b878e0
TLSC
3075 struct in6_addr gw;
3076 struct in6_addr dst;
a36de779 3077
c2b878e0 3078 dst = flow_tnl_dst(&oflow->tunnel);
ed52ca57 3079 if (!ovs_router_lookup(oflow->pkt_mark, &dst, out_dev, src, &gw)) {
a36de779
PS
3080 return -ENOENT;
3081 }
3082
c2b878e0
TLSC
3083 if (ipv6_addr_is_set(&gw) &&
3084 (!IN6_IS_ADDR_V4MAPPED(&gw) || in6_addr_get_mapped_ipv4(&gw))) {
a36de779
PS
3085 *ip = gw;
3086 } else {
c2b878e0 3087 *ip = dst;
a36de779
PS
3088 }
3089
3090 xcfg = ovsrcu_get(struct xlate_cfg *, &xcfgp);
3091 ovs_assert(xcfg);
3092
3093 HMAP_FOR_EACH (xbridge, hmap_node, &xcfg->xbridges) {
3094 if (!strncmp(xbridge->name, out_dev, IFNAMSIZ)) {
3095 struct xport *port;
3096
3097 HMAP_FOR_EACH (port, ofp_node, &xbridge->xports) {
3098 if (!strncmp(netdev_get_name(port->netdev), out_dev, IFNAMSIZ)) {
3099 *out_port = port;
3100 return 0;
3101 }
3102 }
3103 }
3104 }
3105 return -ENOENT;
3106}
3107
3108static int
cdd42eda
JG
3109compose_table_xlate(struct xlate_ctx *ctx, const struct xport *out_dev,
3110 struct dp_packet *packet)
a36de779 3111{
cdd42eda 3112 struct xbridge *xbridge = out_dev->xbridge;
a36de779
PS
3113 struct ofpact_output output;
3114 struct flow flow;
3115
3116 ofpact_init(&output.ofpact, OFPACT_OUTPUT, sizeof output);
cf62fa4c 3117 flow_extract(packet, &flow);
cdd42eda
JG
3118 flow.in_port.ofp_port = out_dev->ofp_port;
3119 output.port = OFPP_TABLE;
a36de779
PS
3120 output.max_len = 0;
3121
1f4a8933
JR
3122 return ofproto_dpif_execute_actions__(xbridge->ofproto,
3123 ctx->xin->tables_version, &flow,
3124 NULL, &output.ofpact, sizeof output,
2d9b49dd 3125 ctx->depth, ctx->resubmits, packet);
a36de779
PS
3126}
3127
c2b878e0
TLSC
3128static void
3129tnl_send_nd_request(struct xlate_ctx *ctx, const struct xport *out_dev,
3130 const struct eth_addr eth_src,
3131 struct in6_addr * ipv6_src, struct in6_addr * ipv6_dst)
3132{
3133 struct dp_packet packet;
3134
3135 dp_packet_init(&packet, 0);
16187903 3136 compose_nd_ns(&packet, eth_src, ipv6_src, ipv6_dst);
c2b878e0
TLSC
3137 compose_table_xlate(ctx, out_dev, &packet);
3138 dp_packet_uninit(&packet);
3139}
3140
a36de779 3141static void
cdd42eda 3142tnl_send_arp_request(struct xlate_ctx *ctx, const struct xport *out_dev,
74ff3298 3143 const struct eth_addr eth_src,
a36de779
PS
3144 ovs_be32 ip_src, ovs_be32 ip_dst)
3145{
cf62fa4c 3146 struct dp_packet packet;
a36de779 3147
cf62fa4c 3148 dp_packet_init(&packet, 0);
eb0b295e
BP
3149 compose_arp(&packet, ARP_OP_REQUEST,
3150 eth_src, eth_addr_zero, true, ip_src, ip_dst);
a36de779 3151
cdd42eda 3152 compose_table_xlate(ctx, out_dev, &packet);
cf62fa4c 3153 dp_packet_uninit(&packet);
a36de779
PS
3154}
3155
7c12dfc5
SC
3156static void
3157propagate_tunnel_data_to_flow__(struct flow *dst_flow,
3158 const struct flow *src_flow,
3159 struct eth_addr dmac, struct eth_addr smac,
3160 struct in6_addr s_ip6, ovs_be32 s_ip,
3161 bool is_tnl_ipv6, uint8_t nw_proto)
3162{
3163 dst_flow->dl_dst = dmac;
3164 dst_flow->dl_src = smac;
3165
3166 dst_flow->packet_type = htonl(PT_ETH);
3167 dst_flow->nw_dst = src_flow->tunnel.ip_dst;
3168 dst_flow->nw_src = src_flow->tunnel.ip_src;
3169 dst_flow->ipv6_dst = src_flow->tunnel.ipv6_dst;
3170 dst_flow->ipv6_src = src_flow->tunnel.ipv6_src;
3171
3172 dst_flow->nw_tos = src_flow->tunnel.ip_tos;
3173 dst_flow->nw_ttl = src_flow->tunnel.ip_ttl;
3174 dst_flow->tp_dst = src_flow->tunnel.tp_dst;
3175 dst_flow->tp_src = src_flow->tunnel.tp_src;
3176
3177 if (is_tnl_ipv6) {
3178 dst_flow->dl_type = htons(ETH_TYPE_IPV6);
3179 if (ipv6_mask_is_any(&dst_flow->ipv6_src)
3180 && !ipv6_mask_is_any(&s_ip6)) {
3181 dst_flow->ipv6_src = s_ip6;
3182 }
3183 } else {
3184 dst_flow->dl_type = htons(ETH_TYPE_IP);
3185 if (dst_flow->nw_src == 0 && s_ip) {
3186 dst_flow->nw_src = s_ip;
3187 }
3188 }
3189 dst_flow->nw_proto = nw_proto;
3190}
3191
3192/*
3193 * Populate the 'flow' and 'base_flow' L3 fields to do the post tunnel push
3194 * translations.
3195 */
3196static void
3197propagate_tunnel_data_to_flow(struct xlate_ctx *ctx, struct eth_addr dmac,
3198 struct eth_addr smac, struct in6_addr s_ip6,
3199 ovs_be32 s_ip, bool is_tnl_ipv6,
3200 enum ovs_vport_type tnl_type)
3201{
3202 struct flow *base_flow, *flow;
3203 flow = &ctx->xin->flow;
3204 base_flow = &ctx->base_flow;
3205 uint8_t nw_proto = 0;
3206
3207 switch (tnl_type) {
3208 case OVS_VPORT_TYPE_GRE:
3209 nw_proto = IPPROTO_GRE;
3210 break;
3211 case OVS_VPORT_TYPE_VXLAN:
3212 case OVS_VPORT_TYPE_GENEVE:
3213 nw_proto = IPPROTO_UDP;
3214 break;
3215 case OVS_VPORT_TYPE_LISP:
3216 case OVS_VPORT_TYPE_STT:
3217 case OVS_VPORT_TYPE_UNSPEC:
3218 case OVS_VPORT_TYPE_NETDEV:
3219 case OVS_VPORT_TYPE_INTERNAL:
3220 case __OVS_VPORT_TYPE_MAX:
3221 default:
3222 OVS_NOT_REACHED();
3223 break;
3224 }
3225 /*
3226 * Update base_flow first followed by flow as the dst_flow gets modified
3227 * in the function.
3228 */
3229 propagate_tunnel_data_to_flow__(base_flow, flow, dmac, smac, s_ip6, s_ip,
3230 is_tnl_ipv6, nw_proto);
3231 propagate_tunnel_data_to_flow__(flow, flow, dmac, smac, s_ip6, s_ip,
3232 is_tnl_ipv6, nw_proto);
3233}
3234
3235/* Validate if the transalated combined actions are OK to proceed.
3236 * If actions consist of TRUNC action, it is not allowed to do the
3237 * tunnel_push combine as it cannot update stats correctly.
3238 */
3239static bool
3240is_tunnel_actions_clone_ready(struct xlate_ctx *ctx)
3241{
3242 struct nlattr *tnl_actions;
3243 const struct nlattr *a;
3244 unsigned int left;
3245 size_t actions_len;
3246 struct ofpbuf *actions = ctx->odp_actions;
3247
3248 if (!actions) {
3249 /* No actions, no harm in doing combine. */
3250 return true;
3251 }
3252
3253 /* Cannot perform tunnel push on slow path action CONTROLLER_OUTPUT. */
3254 if (ctx->xout->slow & SLOW_CONTROLLER) {
3255 return false;
3256 }
3257 actions_len = actions->size;
3258
3259 tnl_actions =(struct nlattr *)(actions->data);
3260 NL_ATTR_FOR_EACH_UNSAFE (a, left, tnl_actions, actions_len) {
3261 int type = nl_attr_type(a);
3262 if (type == OVS_ACTION_ATTR_TRUNC) {
3263 VLOG_DBG("Cannot do tunnel action-combine on trunc action");
3264 return false;
3265 break;
3266 }
3267 }
3268 return true;
3269}
3270
3271static bool
3272validate_and_combine_post_tnl_actions(struct xlate_ctx *ctx,
3273 const struct xport *xport,
3274 struct xport *out_dev,
3275 struct ovs_action_push_tnl tnl_push_data)
3276{
3277 const struct dpif_flow_stats *backup_resubmit_stats;
3278 struct xlate_cache *backup_xcache;
3279 bool nested_act_flag = false;
3280 struct flow_wildcards tmp_flow_wc;
3281 struct flow_wildcards *backup_flow_wc_ptr;
3282 bool backup_side_effects;
3283 const struct dp_packet *backup_pkt;
3284
3285 memset(&tmp_flow_wc, 0 , sizeof tmp_flow_wc);
3286 backup_flow_wc_ptr = ctx->wc;
3287 ctx->wc = &tmp_flow_wc;
3288 ctx->xin->wc = NULL;
3289 backup_resubmit_stats = ctx->xin->resubmit_stats;
3290 backup_xcache = ctx->xin->xcache;
3291 backup_side_effects = ctx->xin->allow_side_effects;
3292 backup_pkt = ctx->xin->packet;
3293
3294 size_t push_action_size = 0;
3295 size_t clone_ofs = nl_msg_start_nested(ctx->odp_actions,
3296 OVS_ACTION_ATTR_CLONE);
3297 odp_put_tnl_push_action(ctx->odp_actions, &tnl_push_data);
3298 push_action_size = ctx->odp_actions->size;
3299
3300 ctx->xin->resubmit_stats = NULL;
3301 ctx->xin->xcache = xlate_cache_new(); /* Use new temporary cache. */
3302 ctx->xin->allow_side_effects = false;
3303 ctx->xin->packet = NULL;
3304
3305 /* Push the cache entry for the tunnel first. */
3306 struct xc_entry *entry;
3307 entry = xlate_cache_add_entry(ctx->xin->xcache, XC_TUNNEL_HEADER);
3308 entry->tunnel_hdr.hdr_size = tnl_push_data.header_len;
3309 entry->tunnel_hdr.operation = ADD;
3310
3311 apply_nested_clone_actions(ctx, xport, out_dev);
3312 nested_act_flag = is_tunnel_actions_clone_ready(ctx);
3313
3314 if (nested_act_flag) {
3315 /* Similar to the stats update in revalidation, the x_cache entries
3316 * are populated by the previous translation are used to update the
3317 * stats correctly.
3318 */
3319 if (backup_resubmit_stats) {
3320 struct dpif_flow_stats tmp_resubmit_stats;
3321 memcpy(&tmp_resubmit_stats, backup_resubmit_stats,
3322 sizeof tmp_resubmit_stats);
3323 xlate_push_stats(ctx->xin->xcache, &tmp_resubmit_stats);
3324 }
3325 xlate_cache_steal_entries(backup_xcache, ctx->xin->xcache);
3326 } else {
3327 /* Combine is not valid. */
3328 nl_msg_cancel_nested(ctx->odp_actions, clone_ofs);
3329 goto out;
3330 }
3331 if (ctx->odp_actions->size > push_action_size) {
3332 /* Update the CLONE action only when combined. */
3333 nl_msg_end_nested(ctx->odp_actions, clone_ofs);
3334 } else {
3335 nl_msg_cancel_nested(ctx->odp_actions, clone_ofs);
3336 /* XXX : There is no real use-case for a tunnel push without
3337 * any post actions. However keeping it now
3338 * as is to make the 'make check' happy. Should remove when all the
3339 * make check tunnel test case does something meaningful on a
3340 * tunnel encap packets.
3341 */
3342 odp_put_tnl_push_action(ctx->odp_actions, &tnl_push_data);
3343 }
3344
3345out:
3346 /* Restore context status. */
3347 ctx->xin->resubmit_stats = backup_resubmit_stats;
3348 xlate_cache_delete(ctx->xin->xcache);
3349 ctx->xin->xcache = backup_xcache;
3350 ctx->xin->allow_side_effects = backup_side_effects;
3351 ctx->xin->packet = backup_pkt;
3352 ctx->wc = backup_flow_wc_ptr;
3353 return nested_act_flag;
3354}
3355
a36de779 3356static int
81de18ec 3357build_tunnel_send(struct xlate_ctx *ctx, const struct xport *xport,
a36de779
PS
3358 const struct flow *flow, odp_port_t tunnel_odp_port)
3359{
4975aa3e 3360 struct netdev_tnl_build_header_params tnl_params;
a36de779
PS
3361 struct ovs_action_push_tnl tnl_push_data;
3362 struct xport *out_dev = NULL;
c2b878e0
TLSC
3363 ovs_be32 s_ip = 0, d_ip = 0;
3364 struct in6_addr s_ip6 = in6addr_any;
3365 struct in6_addr d_ip6 = in6addr_any;
74ff3298
JR
3366 struct eth_addr smac;
3367 struct eth_addr dmac;
a36de779 3368 int err;
c2b878e0
TLSC
3369 char buf_sip6[INET6_ADDRSTRLEN];
3370 char buf_dip6[INET6_ADDRSTRLEN];
a36de779 3371
7c12dfc5
SC
3372 /* Structures to backup Ethernet and IP of base_flow. */
3373 struct flow old_base_flow;
3374 struct flow old_flow;
3375
3376 /* Backup flow & base_flow data. */
3377 memcpy(&old_base_flow, &ctx->base_flow, sizeof old_base_flow);
3378 memcpy(&old_flow, &ctx->xin->flow, sizeof old_flow);
3379
a8704b50 3380 err = tnl_route_lookup_flow(flow, &d_ip6, &s_ip6, &out_dev);
a36de779 3381 if (err) {
2d9b49dd 3382 xlate_report(ctx, OFT_WARN, "native tunnel routing failed");
a36de779
PS
3383 return err;
3384 }
c2b878e0 3385
2d9b49dd 3386 xlate_report(ctx, OFT_DETAIL, "tunneling to %s via %s",
c2b878e0
TLSC
3387 ipv6_string_mapped(buf_dip6, &d_ip6),
3388 netdev_get_name(out_dev->netdev));
a36de779
PS
3389
3390 /* Use mac addr of bridge port of the peer. */
74ff3298 3391 err = netdev_get_etheraddr(out_dev->netdev, &smac);
a36de779 3392 if (err) {
2d9b49dd
BP
3393 xlate_report(ctx, OFT_WARN,
3394 "tunnel output device lacks Ethernet address");
a36de779
PS
3395 return err;
3396 }
3397
c2b878e0
TLSC
3398 d_ip = in6_addr_get_mapped_ipv4(&d_ip6);
3399 if (d_ip) {
a8704b50 3400 s_ip = in6_addr_get_mapped_ipv4(&s_ip6);
a36de779
PS
3401 }
3402
c2b878e0 3403 err = tnl_neigh_lookup(out_dev->xbridge->name, &d_ip6, &dmac);
a36de779 3404 if (err) {
2d9b49dd
BP
3405 xlate_report(ctx, OFT_DETAIL,
3406 "neighbor cache miss for %s on bridge %s, "
c2b878e0
TLSC
3407 "sending %s request",
3408 buf_dip6, out_dev->xbridge->name, d_ip ? "ARP" : "ND");
3409 if (d_ip) {
3410 tnl_send_arp_request(ctx, out_dev, smac, s_ip, d_ip);
3411 } else {
3412 tnl_send_nd_request(ctx, out_dev, smac, &s_ip6, &d_ip6);
3413 }
a36de779
PS
3414 return err;
3415 }
c2b878e0 3416
a36de779
PS
3417 if (ctx->xin->xcache) {
3418 struct xc_entry *entry;
3419
53902038 3420 entry = xlate_cache_add_entry(ctx->xin->xcache, XC_TNL_NEIGH);
901a517e
JR
3421 ovs_strlcpy(entry->tnl_neigh_cache.br_name, out_dev->xbridge->name,
3422 sizeof entry->tnl_neigh_cache.br_name);
3423 entry->tnl_neigh_cache.d_ipv6 = d_ip6;
a36de779 3424 }
81de18ec 3425
2d9b49dd 3426 xlate_report(ctx, OFT_DETAIL, "tunneling from "ETH_ADDR_FMT" %s"
c2b878e0
TLSC
3427 " to "ETH_ADDR_FMT" %s",
3428 ETH_ADDR_ARGS(smac), ipv6_string_mapped(buf_sip6, &s_ip6),
3429 ETH_ADDR_ARGS(dmac), buf_dip6);
3430
4975aa3e
PS
3431 netdev_init_tnl_build_header_params(&tnl_params, flow, &s_ip6, dmac, smac);
3432 err = tnl_port_build_header(xport->ofport, &tnl_push_data, &tnl_params);
a36de779
PS
3433 if (err) {
3434 return err;
3435 }
81765c00
BP
3436 tnl_push_data.tnl_port = tunnel_odp_port;
3437 tnl_push_data.out_port = out_dev->odp_port;
beb75a40 3438
7c12dfc5
SC
3439 /* After tunnel header has been added, MAC and IP data of flow and
3440 * base_flow need to be set properly, since there is not recirculation
3441 * any more when sending packet to tunnel. */
beb75a40 3442
7c12dfc5
SC
3443 propagate_tunnel_data_to_flow(ctx, dmac, smac, s_ip6, s_ip,
3444 tnl_params.is_ipv6, tnl_push_data.tnl_type);
3445
3446
3447 /* Try to see if its possible to apply nested clone actions on tunnel.
3448 * Revert the combined actions on tunnel if its not valid.
3449 */
3450 if (!validate_and_combine_post_tnl_actions(ctx, xport, out_dev,
3451 tnl_push_data)) {
3452 /* Datapath is not doing the recirculation now, so lets make it
3453 * happen explicitly.
3454 */
3455 size_t clone_ofs = nl_msg_start_nested(ctx->odp_actions,
3456 OVS_ACTION_ATTR_CLONE);
3457 odp_put_tnl_push_action(ctx->odp_actions, &tnl_push_data);
3458 nl_msg_put_u32(ctx->odp_actions, OVS_ACTION_ATTR_RECIRC, 0);
3459 nl_msg_end_nested(ctx->odp_actions, clone_ofs);
3460 }
3461 /* Restore the flows after the translation. */
3462 memcpy(&ctx->xin->flow, &old_flow, sizeof ctx->xin->flow);
3463 memcpy(&ctx->base_flow, &old_base_flow, sizeof ctx->base_flow);
a36de779
PS
3464 return 0;
3465}
3466
704bb0bf
JS
3467static void
3468xlate_commit_actions(struct xlate_ctx *ctx)
3469{
3470 bool use_masked = ctx->xbridge->support.masked_set_action;
3471
3472 ctx->xout->slow |= commit_odp_actions(&ctx->xin->flow, &ctx->base_flow,
3473 ctx->odp_actions, ctx->wc,
1fc11c59
JS
3474 use_masked, ctx->pending_encap,
3475 ctx->encap_data);
f839892a 3476 ctx->pending_encap = false;
1fc11c59
JS
3477 ofpbuf_delete(ctx->encap_data);
3478 ctx->encap_data = NULL;
704bb0bf
JS
3479}
3480
07659514 3481static void
72fe7578 3482clear_conntrack(struct xlate_ctx *ctx)
07659514 3483{
72fe7578 3484 ctx->conntracked = false;
6846e91e 3485 flow_clear_conntrack(&ctx->xin->flow);
07659514
JS
3486}
3487
58d636ee
BK
3488static bool
3489xlate_flow_is_protected(const struct xlate_ctx *ctx, const struct flow *flow, const struct xport *xport_out)
3490{
3491 const struct xport *xport_in;
3492
3493 if (!xport_out) {
3494 return false;
3495 }
3496
3497 xport_in = get_ofp_port(ctx->xbridge, flow->in_port.ofp_port);
3498
3499 return (xport_in && xport_in->xbundle && xport_out->xbundle &&
3500 xport_in->xbundle->protected && xport_out->xbundle->protected);
3501}
3502
8bdb2bdb
SC
3503/* Function to combine actions from following device/port with the current
3504 * device actions in openflow pipeline. Mainly used for the translation of
3505 * patch/tunnel port output actions. It pushes the openflow state into a stack
3506 * first, clear out to execute the packet through the device and finally pop
3507 * the openflow state back from the stack. This is equivalent to cloning
3508 * a packet in translation for the duration of execution.
3509 *
3510 * On output to a patch port, the output action will be replaced with set of
3511 * nested actions on the peer patch port.
3512 * Similarly on output to a tunnel port, the post nested actions on
3513 * tunnel are chained up with the tunnel-push action.
3514 */
3515static void
3516apply_nested_clone_actions(struct xlate_ctx *ctx, const struct xport *in_dev,
3517 struct xport *out_dev)
3518{
3519 struct flow *flow = &ctx->xin->flow;
3520 struct flow old_flow = ctx->xin->flow;
3521 struct flow_tnl old_flow_tnl_wc = ctx->wc->masks.tunnel;
3522 bool old_conntrack = ctx->conntracked;
3523 bool old_was_mpls = ctx->was_mpls;
3524 ovs_version_t old_version = ctx->xin->tables_version;
3525 struct ofpbuf old_stack = ctx->stack;
3526 uint8_t new_stack[1024];
3527 struct ofpbuf old_action_set = ctx->action_set;
3528 struct ovs_list *old_trace = ctx->xin->trace;
3529 uint64_t actset_stub[1024 / 8];
3530
3531 ofpbuf_use_stub(&ctx->stack, new_stack, sizeof new_stack);
3532 ofpbuf_use_stub(&ctx->action_set, actset_stub, sizeof actset_stub);
3533 flow->in_port.ofp_port = out_dev->ofp_port;
3534 flow->metadata = htonll(0);
3535 memset(&flow->tunnel, 0, sizeof flow->tunnel);
86bfb29a 3536 memset(&ctx->wc->masks.tunnel, 0, sizeof ctx->wc->masks.tunnel);
8bdb2bdb
SC
3537 flow->tunnel.metadata.tab =
3538 ofproto_get_tun_tab(&out_dev->xbridge->ofproto->up);
3539 ctx->wc->masks.tunnel.metadata.tab = flow->tunnel.metadata.tab;
3540 memset(flow->regs, 0, sizeof flow->regs);
3541 flow->actset_output = OFPP_UNSET;
3542 clear_conntrack(ctx);
3543 ctx->xin->trace = xlate_report(ctx, OFT_BRIDGE, "bridge(\"%s\")",
3544 out_dev->xbridge->name);
3545 mirror_mask_t old_mirrors = ctx->mirrors;
3546 bool independent_mirrors = out_dev->xbridge != ctx->xbridge;
3547 if (independent_mirrors) {
3548 ctx->mirrors = 0;
3549 }
3550 ctx->xbridge = out_dev->xbridge;
3551
3552 /* The bridge is now known so obtain its table version. */
3553 ctx->xin->tables_version
3554 = ofproto_dpif_get_tables_version(ctx->xbridge->ofproto);
3555
3556 if (!process_special(ctx, out_dev) && may_receive(out_dev, ctx)) {
3557 if (xport_stp_forward_state(out_dev) &&
3558 xport_rstp_forward_state(out_dev)) {
3559 xlate_table_action(ctx, flow->in_port.ofp_port, 0, true, true,
3560 false);
3561 if (!ctx->freezing) {
3562 xlate_action_set(ctx);
3563 }
3564 if (ctx->freezing) {
3565 finish_freezing(ctx);
3566 }
3567 } else {
3568 /* Forwarding is disabled by STP and RSTP. Let OFPP_NORMAL and
3569 * the learning action look at the packet, then drop it. */
3570 struct flow old_base_flow = ctx->base_flow;
3571 size_t old_size = ctx->odp_actions->size;
3572 mirror_mask_t old_mirrors2 = ctx->mirrors;
3573
3574 xlate_table_action(ctx, flow->in_port.ofp_port, 0, true, true,
3575 false);
3576 ctx->mirrors = old_mirrors2;
3577 ctx->base_flow = old_base_flow;
3578 ctx->odp_actions->size = old_size;
3579
3580 /* Undo changes that may have been done for freezing. */
3581 ctx_cancel_freeze(ctx);
3582 }
3583 }
3584
3585 ctx->xin->trace = old_trace;
3586 if (independent_mirrors) {
3587 ctx->mirrors = old_mirrors;
3588 }
3589 ctx->xin->flow = old_flow;
3590 ctx->xbridge = in_dev->xbridge;
3591 ofpbuf_uninit(&ctx->action_set);
3592 ctx->action_set = old_action_set;
3593 ofpbuf_uninit(&ctx->stack);
3594 ctx->stack = old_stack;
3595
3596 /* Restore calling bridge's lookup version. */
3597 ctx->xin->tables_version = old_version;
3598
3599 /* Restore to calling bridge tunneling information */
3600 ctx->wc->masks.tunnel = old_flow_tnl_wc;
3601
3602 /* The out bridge popping MPLS should have no effect on the original
3603 * bridge. */
3604 ctx->was_mpls = old_was_mpls;
3605
3606 /* The out bridge's conntrack execution should have no effect on the
3607 * original bridge. */
3608 ctx->conntracked = old_conntrack;
3609
3610 /* The fact that the out bridge exits (for any reason) does not mean
3611 * that the original bridge should exit. Specifically, if the out
3612 * bridge freezes translation, the original bridge must continue
3613 * processing with the original, not the frozen packet! */
3614 ctx->exit = false;
3615
3616 /* Out bridge errors do not propagate back. */
3617 ctx->error = XLATE_OK;
3618
3619 if (ctx->xin->resubmit_stats) {
3620 netdev_vport_inc_tx(in_dev->netdev, ctx->xin->resubmit_stats);
3621 netdev_vport_inc_rx(out_dev->netdev, ctx->xin->resubmit_stats);
3622 if (out_dev->bfd) {
3623 bfd_account_rx(out_dev->bfd, ctx->xin->resubmit_stats);
3624 }
3625 }
3626 if (ctx->xin->xcache) {
3627 struct xc_entry *entry;
3628
3629 entry = xlate_cache_add_entry(ctx->xin->xcache, XC_NETDEV);
3630 entry->dev.tx = netdev_ref(in_dev->netdev);
3631 entry->dev.rx = netdev_ref(out_dev->netdev);
3632 entry->dev.bfd = bfd_ref(out_dev->bfd);
3633 }
3634}
3635
081617f0
JS
3636static bool
3637check_output_prerequisites(struct xlate_ctx *ctx,
3638 const struct xport *xport,
3639 struct flow *flow,
3640 bool check_stp)
9583bc14 3641{
49a73e0c 3642 struct flow_wildcards *wc = ctx->wc;
9583bc14 3643
46c88433 3644 if (!xport) {
2d9b49dd 3645 xlate_report(ctx, OFT_WARN, "Nonexistent output port");
081617f0 3646 return false;
46c88433 3647 } else if (xport->config & OFPUTIL_PC_NO_FWD) {
2d9b49dd 3648 xlate_report(ctx, OFT_DETAIL, "OFPPC_NO_FWD set, skipping output");
081617f0 3649 return false;
1356dbd1 3650 } else if (ctx->mirror_snaplen != 0 && xport->odp_port == ODPP_NONE) {
2d9b49dd
BP
3651 xlate_report(ctx, OFT_WARN,
3652 "Mirror truncate to ODPP_NONE, skipping output");
081617f0 3653 return false;
58d636ee 3654 } else if (xlate_flow_is_protected(ctx, flow, xport)) {
2d9b49dd
BP
3655 xlate_report(ctx, OFT_WARN,
3656 "Flow is between protected ports, skipping output.");
081617f0 3657 return false;
0d1cee12 3658 } else if (check_stp) {
bbbca389 3659 if (is_stp(&ctx->base_flow)) {
9efd308e
DV
3660 if (!xport_stp_should_forward_bpdu(xport) &&
3661 !xport_rstp_should_manage_bpdu(xport)) {
3662 if (ctx->xbridge->stp != NULL) {
2d9b49dd
BP
3663 xlate_report(ctx, OFT_WARN,
3664 "STP not in listening state, "
3665 "skipping bpdu output");
9efd308e 3666 } else if (ctx->xbridge->rstp != NULL) {
2d9b49dd
BP
3667 xlate_report(ctx, OFT_WARN,
3668 "RSTP not managing BPDU in this state, "
3669 "skipping bpdu output");
9efd308e 3670 }
081617f0 3671 return false;
0d1cee12 3672 }
67818616
MV
3673 } else if ((xport->cfm && cfm_should_process_flow(xport->cfm, flow, wc))
3674 || (xport->bfd && bfd_should_process_flow(xport->bfd, flow,
3675 wc))) {
3676 /* Pass; STP should not block link health detection. */
9efd308e
DV
3677 } else if (!xport_stp_forward_state(xport) ||
3678 !xport_rstp_forward_state(xport)) {
3679 if (ctx->xbridge->stp != NULL) {
2d9b49dd
BP
3680 xlate_report(ctx, OFT_WARN,
3681 "STP not in forwarding state, skipping output");
9efd308e 3682 } else if (ctx->xbridge->rstp != NULL) {
2d9b49dd
BP
3683 xlate_report(ctx, OFT_WARN,
3684 "RSTP not in forwarding state, skipping output");
9efd308e 3685 }
081617f0 3686 return false;
0d1cee12 3687 }
9583bc14 3688 }
5dbfe239
ZB
3689
3690 if (xport->pt_mode == NETDEV_PT_LEGACY_L2 &&
3691 flow->packet_type != htonl(PT_ETH)) {
3692 xlate_report(ctx, OFT_WARN, "Trying to send non-Ethernet packet "
3693 "through legacy L2 port. Dropping packet.");
3694 return false;
3695 }
3696
081617f0
JS
3697 return true;
3698}
3699
3700static bool
3701terminate_native_tunnel(struct xlate_ctx *ctx, ofp_port_t ofp_port,
3702 struct flow *flow, struct flow_wildcards *wc,
3703 odp_port_t *tnl_port)
3704{
3705 *tnl_port = ODPP_NONE;
3706
3707 /* XXX: Write better Filter for tunnel port. We can use in_port
3708 * in tunnel-port flow to avoid these checks completely. */
3709 if (ofp_port == OFPP_LOCAL &&
3710 ovs_native_tunneling_is_on(ctx->xbridge->ofproto)) {
3711 *tnl_port = tnl_port_map_lookup(flow, wc);
3712 }
3713
3714 return *tnl_port != ODPP_NONE;
3715}
3716
3717static void
3718compose_output_action__(struct xlate_ctx *ctx, ofp_port_t ofp_port,
3719 const struct xlate_bond_recirc *xr, bool check_stp)
3720{
3721 const struct xport *xport = get_ofp_port(ctx->xbridge, ofp_port);
3722 struct flow_wildcards *wc = ctx->wc;
3723 struct flow *flow = &ctx->xin->flow;
3724 struct flow_tnl flow_tnl;
3725 union flow_vlan_hdr flow_vlans[FLOW_MAX_VLAN_HEADERS];
3726 uint8_t flow_nw_tos;
3727 odp_port_t out_port, odp_port, odp_tnl_port;
3728 bool is_native_tunnel = false;
3729 uint8_t dscp;
5dbfe239
ZB
3730 struct eth_addr flow_dl_dst = flow->dl_dst;
3731 struct eth_addr flow_dl_src = flow->dl_src;
3732 ovs_be32 flow_packet_type = flow->packet_type;
3733 ovs_be16 flow_dl_type = flow->dl_type;
081617f0
JS
3734
3735 /* If 'struct flow' gets additional metadata, we'll need to zero it out
3736 * before traversing a patch port. */
3d2fbd70 3737 BUILD_ASSERT_DECL(FLOW_WC_SEQ == 40);
081617f0
JS
3738 memset(&flow_tnl, 0, sizeof flow_tnl);
3739
3740 if (!check_output_prerequisites(ctx, xport, flow, check_stp)) {
3741 return;
3742 }
9583bc14 3743
875ab130
BP
3744 if (flow->packet_type == htonl(PT_ETH)) {
3745 /* Strip Ethernet header for legacy L3 port. */
3746 if (xport->pt_mode == NETDEV_PT_LEGACY_L3) {
3747 flow->packet_type = PACKET_TYPE_BE(OFPHTN_ETHERTYPE,
3748 ntohs(flow->dl_type));
3749 }
beb75a40
JS
3750 }
3751
46c88433 3752 if (xport->peer) {
8bdb2bdb
SC
3753 apply_nested_clone_actions(ctx, xport, xport->peer);
3754 return;
9583bc14
EJ
3755 }
3756
f0fb825a 3757 memcpy(flow_vlans, flow->vlans, sizeof flow_vlans);
33bf9176 3758 flow_nw_tos = flow->nw_tos;
9583bc14 3759
16194afd
DDP
3760 if (count_skb_priorities(xport)) {
3761 memset(&wc->masks.skb_priority, 0xff, sizeof wc->masks.skb_priority);
3762 if (dscp_from_skb_priority(xport, flow->skb_priority, &dscp)) {
3763 wc->masks.nw_tos |= IP_DSCP_MASK;
3764 flow->nw_tos &= ~IP_DSCP_MASK;
3765 flow->nw_tos |= dscp;
3766 }
9583bc14
EJ
3767 }
3768
46c88433 3769 if (xport->is_tunnel) {
c2b878e0 3770 struct in6_addr dst;
9583bc14
EJ
3771 /* Save tunnel metadata so that changes made due to
3772 * the Logical (tunnel) Port are not visible for any further
3773 * matches, while explicit set actions on tunnel metadata are.
3774 */
a36de779 3775 flow_tnl = flow->tunnel;
49a73e0c 3776 odp_port = tnl_port_send(xport->ofport, flow, ctx->wc);
4e022ec0 3777 if (odp_port == ODPP_NONE) {
2d9b49dd 3778 xlate_report(ctx, OFT_WARN, "Tunneling decided against output");
9583bc14
EJ
3779 goto out; /* restore flow_nw_tos */
3780 }
c2b878e0
TLSC
3781 dst = flow_tnl_dst(&flow->tunnel);
3782 if (ipv6_addr_equals(&dst, &ctx->orig_tunnel_ipv6_dst)) {
2d9b49dd 3783 xlate_report(ctx, OFT_WARN, "Not tunneling to our own address");
9583bc14
EJ
3784 goto out; /* restore flow_nw_tos */
3785 }
3786 if (ctx->xin->resubmit_stats) {
46c88433 3787 netdev_vport_inc_tx(xport->netdev, ctx->xin->resubmit_stats);
9583bc14 3788 }
b256dc52
JS
3789 if (ctx->xin->xcache) {
3790 struct xc_entry *entry;
3791
3792 entry = xlate_cache_add_entry(ctx->xin->xcache, XC_NETDEV);
901a517e 3793 entry->dev.tx = netdev_ref(xport->netdev);
b256dc52 3794 }
9583bc14 3795 out_port = odp_port;
a36de779 3796 if (ovs_native_tunneling_is_on(ctx->xbridge->ofproto)) {
2d9b49dd 3797 xlate_report(ctx, OFT_DETAIL, "output to native tunnel");
081617f0 3798 is_native_tunnel = true;
a36de779 3799 } else {
2d9b49dd 3800 xlate_report(ctx, OFT_DETAIL, "output to kernel tunnel");
1520ef4f 3801 commit_odp_tunnel_action(flow, &ctx->base_flow, ctx->odp_actions);
a36de779
PS
3802 flow->tunnel = flow_tnl; /* Restore tunnel metadata */
3803 }
9583bc14 3804 } else {
46c88433 3805 odp_port = xport->odp_port;
7614e5d0 3806 out_port = odp_port;
9583bc14 3807 }
9583bc14 3808
4e022ec0 3809 if (out_port != ODPP_NONE) {
081617f0 3810 /* Commit accumulated flow updates before output. */
704bb0bf 3811 xlate_commit_actions(ctx);
adcf00ba 3812
e93ef1c7 3813 if (xr) {
081617f0 3814 /* Recirculate the packet. */
347bf289 3815 struct ovs_action_hash *act_hash;
adcf00ba 3816
347bf289 3817 /* Hash action. */
1520ef4f 3818 act_hash = nl_msg_put_unspec_uninit(ctx->odp_actions,
347bf289
AZ
3819 OVS_ACTION_ATTR_HASH,
3820 sizeof *act_hash);
3821 act_hash->hash_alg = xr->hash_alg;
62ac1f20 3822 act_hash->hash_basis = xr->hash_basis;
347bf289
AZ
3823
3824 /* Recirc action. */
1520ef4f 3825 nl_msg_put_u32(ctx->odp_actions, OVS_ACTION_ATTR_RECIRC,
347bf289 3826 xr->recirc_id);
081617f0
JS
3827 } else if (is_native_tunnel) {
3828 /* Output to native tunnel port. */
3829 build_tunnel_send(ctx, xport, flow, odp_port);
3830 flow->tunnel = flow_tnl; /* Restore tunnel metadata */
a36de779 3831
081617f0
JS
3832 } else if (terminate_native_tunnel(ctx, ofp_port, flow, wc,
3833 &odp_tnl_port)) {
3834 /* Intercept packet to be received on native tunnel port. */
3835 nl_msg_put_odp_port(ctx->odp_actions, OVS_ACTION_ATTR_TUNNEL_POP,
3836 odp_tnl_port);
a36de779 3837
081617f0
JS
3838 } else {
3839 /* Tunnel push-pop action is not compatible with
3840 * IPFIX action. */
3841 compose_ipfix_action(ctx, out_port);
3842
3843 /* Handle truncation of the mirrored packet. */
3844 if (ctx->mirror_snaplen > 0 &&
3845 ctx->mirror_snaplen < UINT16_MAX) {
3846 struct ovs_action_trunc *trunc;
3847
3848 trunc = nl_msg_put_unspec_uninit(ctx->odp_actions,
3849 OVS_ACTION_ATTR_TRUNC,
3850 sizeof *trunc);
3851 trunc->max_len = ctx->mirror_snaplen;
3852 if (!ctx->xbridge->support.trunc) {
3853 ctx->xout->slow |= SLOW_ACTION;
1356dbd1
WT
3854 }
3855 }
081617f0
JS
3856
3857 nl_msg_put_odp_port(ctx->odp_actions,
3858 OVS_ACTION_ATTR_OUTPUT,
3859 out_port);
adcf00ba 3860 }
9583bc14 3861
6cbbf4fa
EJ
3862 ctx->sflow_odp_port = odp_port;
3863 ctx->sflow_n_outputs++;
2031ef97 3864 ctx->nf_output_iface = ofp_port;
6cbbf4fa
EJ
3865 }
3866
7efbc3b7
BP
3867 if (mbridge_has_mirrors(ctx->xbridge->mbridge) && xport->xbundle) {
3868 mirror_packet(ctx, xport->xbundle,
3869 xbundle_mirror_dst(xport->xbundle->xbridge,
3870 xport->xbundle));
3871 }
3872
6cbbf4fa 3873 out:
9583bc14 3874 /* Restore flow */
f0fb825a 3875 memcpy(flow->vlans, flow_vlans, sizeof flow->vlans);
33bf9176 3876 flow->nw_tos = flow_nw_tos;
5dbfe239
ZB
3877 flow->dl_dst = flow_dl_dst;
3878 flow->dl_src = flow_dl_src;
3879 flow->packet_type = flow_packet_type;
3880 flow->dl_type = flow_dl_type;
9583bc14
EJ
3881}
3882
3883static void
e93ef1c7
JR
3884compose_output_action(struct xlate_ctx *ctx, ofp_port_t ofp_port,
3885 const struct xlate_bond_recirc *xr)
9583bc14 3886{
e93ef1c7 3887 compose_output_action__(ctx, ofp_port, xr, true);
9583bc14
EJ
3888}
3889
bb61b33d 3890static void
790c5d26 3891xlate_recursively(struct xlate_ctx *ctx, struct rule_dpif *rule, bool deepens)
bb61b33d
BP
3892{
3893 struct rule_dpif *old_rule = ctx->rule;
8b1e5560 3894 ovs_be64 old_cookie = ctx->rule_cookie;
dc723c44 3895 const struct rule_actions *actions;
bb61b33d
BP
3896
3897 if (ctx->xin->resubmit_stats) {
70742c7f 3898 rule_dpif_credit_stats(rule, ctx->xin->resubmit_stats);
bb61b33d
BP
3899 }
3900
98b07853 3901 ctx->resubmits++;
790c5d26 3902
790c5d26 3903 ctx->depth += deepens;
bb61b33d 3904 ctx->rule = rule;
07a3cd5c
BP
3905 ctx->rule_cookie = rule->up.flow_cookie;
3906 actions = rule_get_actions(&rule->up);
6f00e29b 3907 do_xlate_actions(actions->ofpacts, actions->ofpacts_len, ctx);
8b1e5560 3908 ctx->rule_cookie = old_cookie;
bb61b33d 3909 ctx->rule = old_rule;
790c5d26 3910 ctx->depth -= deepens;
bb61b33d
BP
3911}
3912
bd3240ba
SH
3913static bool
3914xlate_resubmit_resource_check(struct xlate_ctx *ctx)
9583bc14 3915{
790c5d26 3916 if (ctx->depth >= MAX_DEPTH) {
2d9b49dd 3917 xlate_report_error(ctx, "over max translation depth %d", MAX_DEPTH);
fff1b9c0 3918 ctx->error = XLATE_RECURSION_TOO_DEEP;
790c5d26 3919 } else if (ctx->resubmits >= MAX_RESUBMITS) {
2d9b49dd 3920 xlate_report_error(ctx, "over %d resubmit actions", MAX_RESUBMITS);
fff1b9c0 3921 ctx->error = XLATE_TOO_MANY_RESUBMITS;
1520ef4f 3922 } else if (ctx->odp_actions->size > UINT16_MAX) {
2d9b49dd 3923 xlate_report_error(ctx, "resubmits yielded over 64 kB of actions");
fff1b9c0
JR
3924 /* NOT an error, as we'll be slow-pathing the flow in this case? */
3925 ctx->exit = true; /* XXX: translation still terminated! */
6fd6ed71 3926 } else if (ctx->stack.size >= 65536) {
2d9b49dd 3927 xlate_report_error(ctx, "resubmits yielded over 64 kB of stack");
fff1b9c0 3928 ctx->error = XLATE_STACK_TOO_DEEP;
98b07853 3929 } else {
bd3240ba
SH
3930 return true;
3931 }
3932
3933 return false;
3934}
3935
2cd20955
JR
3936static void
3937tuple_swap_flow(struct flow *flow, bool ipv4)
3938{
3939 uint8_t nw_proto = flow->nw_proto;
3940 flow->nw_proto = flow->ct_nw_proto;
3941 flow->ct_nw_proto = nw_proto;
3942
3943 if (ipv4) {
3944 ovs_be32 nw_src = flow->nw_src;
3945 flow->nw_src = flow->ct_nw_src;
3946 flow->ct_nw_src = nw_src;
3947
3948 ovs_be32 nw_dst = flow->nw_dst;
3949 flow->nw_dst = flow->ct_nw_dst;
3950 flow->ct_nw_dst = nw_dst;
3951 } else {
3952 struct in6_addr ipv6_src = flow->ipv6_src;
3953 flow->ipv6_src = flow->ct_ipv6_src;
3954 flow->ct_ipv6_src = ipv6_src;
3955
3956 struct in6_addr ipv6_dst = flow->ipv6_dst;
3957 flow->ipv6_dst = flow->ct_ipv6_dst;
3958 flow->ct_ipv6_dst = ipv6_dst;
3959 }
3960
3961 ovs_be16 tp_src = flow->tp_src;
3962 flow->tp_src = flow->ct_tp_src;
3963 flow->ct_tp_src = tp_src;
3964
3965 ovs_be16 tp_dst = flow->tp_dst;
3966 flow->tp_dst = flow->ct_tp_dst;
3967 flow->ct_tp_dst = tp_dst;
3968}
3969
3970static void
3971tuple_swap(struct flow *flow, struct flow_wildcards *wc)
3972{
3973 bool ipv4 = (flow->dl_type == htons(ETH_TYPE_IP));
3974
3975 tuple_swap_flow(flow, ipv4);
3976 tuple_swap_flow(&wc->masks, ipv4);
3977}
3978
bd3240ba 3979static void
6d328fa2 3980xlate_table_action(struct xlate_ctx *ctx, ofp_port_t in_port, uint8_t table_id,
2cd20955
JR
3981 bool may_packet_in, bool honor_table_miss,
3982 bool with_ct_orig)
bd3240ba 3983{
e12ec36b
SH
3984 /* Check if we need to recirculate before matching in a table. */
3985 if (ctx->was_mpls) {
3986 ctx_trigger_freeze(ctx);
3987 return;
3988 }
bd3240ba 3989 if (xlate_resubmit_resource_check(ctx)) {
9583bc14 3990 uint8_t old_table_id = ctx->table_id;
3f207910 3991 struct rule_dpif *rule;
9583bc14
EJ
3992
3993 ctx->table_id = table_id;
3994
2cd20955
JR
3995 /* Swap packet fields with CT 5-tuple if requested. */
3996 if (with_ct_orig) {
3997 /* Do not swap if there is no CT tuple, or if key is not IP. */
3998 if (ctx->xin->flow.ct_nw_proto == 0 ||
3999 !is_ip_any(&ctx->xin->flow)) {
4000 xlate_report_error(ctx,
4001 "resubmit(ct) with non-tracked or non-IP packet!");
4002 return;
4003 }
4004 tuple_swap(&ctx->xin->flow, ctx->wc);
4005 }
34dd0d78 4006 rule = rule_dpif_lookup_from_table(ctx->xbridge->ofproto,
1f4a8933 4007 ctx->xin->tables_version,
c0e638aa 4008 &ctx->xin->flow, ctx->wc,
34dd0d78
JR
4009 ctx->xin->resubmit_stats,
4010 &ctx->table_id, in_port,
a027899e
JR
4011 may_packet_in, honor_table_miss,
4012 ctx->xin->xcache);
2cd20955
JR
4013 /* Swap back. */
4014 if (with_ct_orig) {
4015 tuple_swap(&ctx->xin->flow, ctx->wc);
4016 }
ad3efdcb 4017
a2143702 4018 if (rule) {
83709dfa
JR
4019 /* Fill in the cache entry here instead of xlate_recursively
4020 * to make the reference counting more explicit. We take a
4021 * reference in the lookups above if we are going to cache the
4022 * rule. */
4023 if (ctx->xin->xcache) {
4024 struct xc_entry *entry;
4025
4026 entry = xlate_cache_add_entry(ctx->xin->xcache, XC_RULE);
901a517e 4027 entry->rule = rule;
07a3cd5c 4028 ofproto_rule_ref(&rule->up);
83709dfa 4029 }
2d9b49dd
BP
4030
4031 struct ovs_list *old_trace = ctx->xin->trace;
4032 xlate_report_table(ctx, rule, table_id);
790c5d26 4033 xlate_recursively(ctx, rule, table_id <= old_table_id);
2d9b49dd 4034 ctx->xin->trace = old_trace;
ad3efdcb
EJ
4035 }
4036
9583bc14 4037 ctx->table_id = old_table_id;
98b07853 4038 return;
9583bc14
EJ
4039 }
4040}
4041
76973237 4042/* Consumes the group reference, which is only taken if xcache exists. */
f4fb341b 4043static void
1e684d7d
RW
4044xlate_group_stats(struct xlate_ctx *ctx, struct group_dpif *group,
4045 struct ofputil_bucket *bucket)
4046{
4047 if (ctx->xin->resubmit_stats) {
4048 group_dpif_credit_stats(group, bucket, ctx->xin->resubmit_stats);
4049 }
4050 if (ctx->xin->xcache) {
4051 struct xc_entry *entry;
4052
4053 entry = xlate_cache_add_entry(ctx->xin->xcache, XC_GROUP);
901a517e
JR
4054 entry->group.group = group;
4055 entry->group.bucket = bucket;
1e684d7d
RW
4056 }
4057}
4058
4059static void
4060xlate_group_bucket(struct xlate_ctx *ctx, struct ofputil_bucket *bucket)
f4fb341b
SH
4061{
4062 uint64_t action_list_stub[1024 / 8];
0a2869d5
BP
4063 struct ofpbuf action_list = OFPBUF_STUB_INITIALIZER(action_list_stub);
4064 struct ofpbuf action_set = ofpbuf_const_initializer(bucket->ofpacts,
4065 bucket->ofpacts_len);
5b09e569 4066 struct flow old_flow = ctx->xin->flow;
e12ec36b 4067 bool old_was_mpls = ctx->was_mpls;
f4fb341b 4068
f4fb341b 4069 ofpacts_execute_action_set(&action_list, &action_set);
790c5d26 4070 ctx->depth++;
6fd6ed71 4071 do_xlate_actions(action_list.data, action_list.size, ctx);
790c5d26 4072 ctx->depth--;
f4fb341b 4073
f4fb341b 4074 ofpbuf_uninit(&action_list);
5b09e569 4075
77ab5fd2 4076 /* Check if need to freeze. */
1d361a81 4077 if (ctx->freezing) {
77ab5fd2 4078 finish_freezing(ctx);
e672ff9b
JR
4079 }
4080
5b09e569
JR
4081 /* Roll back flow to previous state.
4082 * This is equivalent to cloning the packet for each bucket.
4083 *
4084 * As a side effect any subsequently applied actions will
4085 * also effectively be applied to a clone of the packet taken
4086 * just before applying the all or indirect group.
4087 *
4088 * Note that group buckets are action sets, hence they cannot modify the
4089 * main action set. Also any stack actions are ignored when executing an
4090 * action set, so group buckets cannot change the stack either.
4091 * However, we do allow resubmit actions in group buckets, which could
4092 * break the above assumptions. It is up to the controller to not mess up
4093 * with the action_set and stack in the tables resubmitted to from
4094 * group buckets. */
4095 ctx->xin->flow = old_flow;
4096
e12ec36b
SH
4097 /* The group bucket popping MPLS should have no effect after bucket
4098 * execution. */
4099 ctx->was_mpls = old_was_mpls;
4100
5b09e569
JR
4101 /* The fact that the group bucket exits (for any reason) does not mean that
4102 * the translation after the group action should exit. Specifically, if
1d361a81
BP
4103 * the group bucket freezes translation, the actions after the group action
4104 * must continue processing with the original, not the frozen packet! */
5b09e569 4105 ctx->exit = false;
f4fb341b
SH
4106}
4107
4108static void
4109xlate_all_group(struct xlate_ctx *ctx, struct group_dpif *group)
4110{
1e684d7d 4111 struct ofputil_bucket *bucket;
07a3cd5c 4112 LIST_FOR_EACH (bucket, list_node, &group->up.buckets) {
f4fb341b 4113 xlate_group_bucket(ctx, bucket);
f4fb341b 4114 }
1e684d7d 4115 xlate_group_stats(ctx, group, NULL);
f4fb341b
SH
4116}
4117
dd8cd4b4
SH
4118static void
4119xlate_ff_group(struct xlate_ctx *ctx, struct group_dpif *group)
4120{
1e684d7d 4121 struct ofputil_bucket *bucket;
dd8cd4b4
SH
4122
4123 bucket = group_first_live_bucket(ctx, group, 0);
4124 if (bucket) {
4125 xlate_group_bucket(ctx, bucket);
1e684d7d 4126 xlate_group_stats(ctx, group, bucket);
76973237 4127 } else if (ctx->xin->xcache) {
07a3cd5c 4128 ofproto_group_unref(&group->up);
dd8cd4b4
SH
4129 }
4130}
4131
fe7e5749 4132static void
7565c3e4 4133xlate_default_select_group(struct xlate_ctx *ctx, struct group_dpif *group)
fe7e5749 4134{
49a73e0c 4135 struct flow_wildcards *wc = ctx->wc;
1e684d7d 4136 struct ofputil_bucket *bucket;
fe7e5749
SH
4137 uint32_t basis;
4138
1d1aae0b 4139 basis = flow_hash_symmetric_l4(&ctx->xin->flow, 0);
80e3509d 4140 flow_mask_hash_fields(&ctx->xin->flow, wc, NX_HASH_FIELDS_SYMMETRIC_L4);
fe7e5749
SH
4141 bucket = group_best_live_bucket(ctx, group, basis);
4142 if (bucket) {
fe7e5749 4143 xlate_group_bucket(ctx, bucket);
1e684d7d 4144 xlate_group_stats(ctx, group, bucket);
76973237 4145 } else if (ctx->xin->xcache) {
07a3cd5c 4146 ofproto_group_unref(&group->up);
fe7e5749
SH
4147 }
4148}
4149
0c4b9393
SH
4150static void
4151xlate_hash_fields_select_group(struct xlate_ctx *ctx, struct group_dpif *group)
4152{
07a3cd5c
BP
4153 const struct field_array *fields = &group->up.props.fields;
4154 const uint8_t *mask_values = fields->values;
4155 uint32_t basis = hash_uint64(group->up.props.selection_method_param);
0c4b9393 4156
07a3cd5c 4157 size_t i;
e8dba719
JR
4158 BITMAP_FOR_EACH_1 (i, MFF_N_IDS, fields->used.bm) {
4159 const struct mf_field *mf = mf_from_id(i);
0c4b9393 4160
5bcd4754 4161 /* Skip fields for which prerequisites are not met. */
e8dba719
JR
4162 if (!mf_are_prereqs_ok(mf, &ctx->xin->flow, ctx->wc)) {
4163 /* Skip the mask bytes for this field. */
4164 mask_values += mf->n_bytes;
4165 continue;
4166 }
0c4b9393 4167
e8dba719
JR
4168 union mf_value value;
4169 union mf_value mask;
0c4b9393 4170
e8dba719
JR
4171 mf_get_value(mf, &ctx->xin->flow, &value);
4172 /* Mask the value. */
4173 for (int j = 0; j < mf->n_bytes; j++) {
4174 mask.b[j] = *mask_values++;
4175 value.b[j] &= mask.b[j];
4176 }
4177 basis = hash_bytes(&value, mf->n_bytes, basis);
1cb20095 4178
e8dba719
JR
4179 /* For tunnels, hash in whether the field is present. */
4180 if (mf_is_tun_metadata(mf)) {
4181 basis = hash_boolean(mf_is_set(mf, &ctx->xin->flow), basis);
0c4b9393 4182 }
e8dba719
JR
4183
4184 mf_mask_field_masked(mf, &mask, ctx->wc);
0c4b9393
SH
4185 }
4186
07a3cd5c 4187 struct ofputil_bucket *bucket = group_best_live_bucket(ctx, group, basis);
0c4b9393
SH
4188 if (bucket) {
4189 xlate_group_bucket(ctx, bucket);
4190 xlate_group_stats(ctx, group, bucket);
76973237 4191 } else if (ctx->xin->xcache) {
07a3cd5c 4192 ofproto_group_unref(&group->up);
0c4b9393
SH
4193 }
4194}
4195
53cc166a
JR
4196static void
4197xlate_dp_hash_select_group(struct xlate_ctx *ctx, struct group_dpif *group)
4198{
4199 struct ofputil_bucket *bucket;
4200
4201 /* dp_hash value 0 is special since it means that the dp_hash has not been
4202 * computed, as all computed dp_hash values are non-zero. Therefore
4203 * compare to zero can be used to decide if the dp_hash value is valid
4204 * without masking the dp_hash field. */
4205 if (!ctx->xin->flow.dp_hash) {
07a3cd5c 4206 uint64_t param = group->up.props.selection_method_param;
53cc166a
JR
4207
4208 ctx_trigger_recirculate_with_hash(ctx, param >> 32, (uint32_t)param);
4209 } else {
07a3cd5c 4210 uint32_t n_buckets = group->up.n_buckets;
53cc166a
JR
4211 if (n_buckets) {
4212 /* Minimal mask to cover the number of buckets. */
4213 uint32_t mask = (1 << log_2_ceil(n_buckets)) - 1;
4214 /* Multiplier chosen to make the trivial 1 bit case to
4215 * actually distribute amongst two equal weight buckets. */
4216 uint32_t basis = 0xc2b73583 * (ctx->xin->flow.dp_hash & mask);
4217
4218 ctx->wc->masks.dp_hash |= mask;
4219 bucket = group_best_live_bucket(ctx, group, basis);
4220 if (bucket) {
4221 xlate_group_bucket(ctx, bucket);
4222 xlate_group_stats(ctx, group, bucket);
4223 }
4224 }
4225 }
4226}
4227
7565c3e4
SH
4228static void
4229xlate_select_group(struct xlate_ctx *ctx, struct group_dpif *group)
4230{
07a3cd5c 4231 const char *selection_method = group->up.props.selection_method;
7565c3e4 4232
e12ec36b
SH
4233 /* Select groups may access flow keys beyond L2 in order to
4234 * select a bucket. Recirculate as appropriate to make this possible.
4235 */
4236 if (ctx->was_mpls) {
4237 ctx_trigger_freeze(ctx);
4238 }
4239
7565c3e4
SH
4240 if (selection_method[0] == '\0') {
4241 xlate_default_select_group(ctx, group);
0c4b9393
SH
4242 } else if (!strcasecmp("hash", selection_method)) {
4243 xlate_hash_fields_select_group(ctx, group);
53cc166a
JR
4244 } else if (!strcasecmp("dp_hash", selection_method)) {
4245 xlate_dp_hash_select_group(ctx, group);
7565c3e4
SH
4246 } else {
4247 /* Parsing of groups should ensure this never happens */
4248 OVS_NOT_REACHED();
4249 }
4250}
4251
f4fb341b
SH
4252static void
4253xlate_group_action__(struct xlate_ctx *ctx, struct group_dpif *group)
4254{
0eb48fe1 4255 bool was_in_group = ctx->in_group;
5a070238
BP
4256 ctx->in_group = true;
4257
07a3cd5c 4258 switch (group->up.type) {
f4fb341b
SH
4259 case OFPGT11_ALL:
4260 case OFPGT11_INDIRECT:
4261 xlate_all_group(ctx, group);
4262 break;
4263 case OFPGT11_SELECT:
fe7e5749 4264 xlate_select_group(ctx, group);
f4fb341b 4265 break;
dd8cd4b4
SH
4266 case OFPGT11_FF:
4267 xlate_ff_group(ctx, group);
4268 break;
f4fb341b 4269 default:
428b2edd 4270 OVS_NOT_REACHED();
f4fb341b 4271 }
5a070238 4272
0eb48fe1 4273 ctx->in_group = was_in_group;
f4fb341b
SH
4274}
4275
4276static bool
4277xlate_group_action(struct xlate_ctx *ctx, uint32_t group_id)
4278{
0eb48fe1 4279 if (xlate_resubmit_resource_check(ctx)) {
f4fb341b 4280 struct group_dpif *group;
f4fb341b 4281
76973237
JR
4282 /* Take ref only if xcache exists. */
4283 group = group_dpif_lookup(ctx->xbridge->ofproto, group_id,
1f4a8933 4284 ctx->xin->tables_version, ctx->xin->xcache);
db88b35c
JR
4285 if (!group) {
4286 /* XXX: Should set ctx->error ? */
2d9b49dd
BP
4287 xlate_report(ctx, OFT_WARN, "output to nonexistent group %"PRIu32,
4288 group_id);
f4fb341b
SH
4289 return true;
4290 }
db88b35c 4291 xlate_group_action__(ctx, group);
f4fb341b
SH
4292 }
4293
4294 return false;
4295}
4296
9583bc14
EJ
4297static void
4298xlate_ofpact_resubmit(struct xlate_ctx *ctx,
4299 const struct ofpact_resubmit *resubmit)
4300{
4e022ec0 4301 ofp_port_t in_port;
9583bc14 4302 uint8_t table_id;
adcf00ba
AZ
4303 bool may_packet_in = false;
4304 bool honor_table_miss = false;
4305
4306 if (ctx->rule && rule_dpif_is_internal(ctx->rule)) {
4307 /* Still allow missed packets to be sent to the controller
4308 * if resubmitting from an internal table. */
4309 may_packet_in = true;
4310 honor_table_miss = true;
4311 }
9583bc14
EJ
4312
4313 in_port = resubmit->in_port;
4314 if (in_port == OFPP_IN_PORT) {
4e022ec0 4315 in_port = ctx->xin->flow.in_port.ofp_port;
9583bc14
EJ
4316 }
4317
4318 table_id = resubmit->table_id;
4319 if (table_id == 255) {
4320 table_id = ctx->table_id;
4321 }
4322
adcf00ba 4323 xlate_table_action(ctx, in_port, table_id, may_packet_in,
2cd20955 4324 honor_table_miss, resubmit->with_ct_orig);
9583bc14
EJ
4325}
4326
4327static void
4328flood_packets(struct xlate_ctx *ctx, bool all)
4329{
46c88433 4330 const struct xport *xport;
9583bc14 4331
46c88433
EJ
4332 HMAP_FOR_EACH (xport, ofp_node, &ctx->xbridge->xports) {
4333 if (xport->ofp_port == ctx->xin->flow.in_port.ofp_port) {
9583bc14
EJ
4334 continue;
4335 }
4336
4337 if (all) {
e93ef1c7 4338 compose_output_action__(ctx, xport->ofp_port, NULL, false);
46c88433 4339 } else if (!(xport->config & OFPUTIL_PC_NO_FLOOD)) {
e93ef1c7 4340 compose_output_action(ctx, xport->ofp_port, NULL);
9583bc14
EJ
4341 }
4342 }
4343
2031ef97 4344 ctx->nf_output_iface = NF_OUT_FLOOD;
9583bc14
EJ
4345}
4346
27d931da
AZ
4347/* Copy and reformat a partially xlated odp actions to a new
4348 * odp actions list in 'b', so that the new actions list
4349 * can be executed by odp_execute_actions.
4350 *
4351 * When xlate using nested odp actions, such as sample and clone,
4352 * the nested action created by nl_msg_start_nested() may not
4353 * have been properly closed yet, thus can not be executed
4354 * directly.
4355 *
4356 * Since unclosed nested action has to be last action, it can be
4357 * fixed by skipping the outer header, and treating the actions within
4358 * as if they are outside the nested attribute since the effect
4359 * of executing them on packet is the same.
4360 *
4361 * As an optimization, a fully closed 'sample' or 'clone' action
4362 * is skipped since their execution has no effect to the packet.
4363 *
4364 * Returns true if success. 'b' contains the new actions list.
4365 * The caller is responsible for disposing 'b'.
4366 *
4367 * Returns false if error, 'b' has been freed already. */
4368static bool
4369xlate_fixup_actions(struct ofpbuf *b, const struct nlattr *actions,
4370 size_t actions_len)
4371{
4372 const struct nlattr *a;
4373 unsigned int left;
4374
4375 NL_ATTR_FOR_EACH_UNSAFE (a, left, actions, actions_len) {
4376 int type = nl_attr_type(a);
4377
4378 switch ((enum ovs_action_attr) type) {
4379 case OVS_ACTION_ATTR_HASH:
4380 case OVS_ACTION_ATTR_PUSH_VLAN:
4381 case OVS_ACTION_ATTR_POP_VLAN:
4382 case OVS_ACTION_ATTR_PUSH_MPLS:
4383 case OVS_ACTION_ATTR_POP_MPLS:
4384 case OVS_ACTION_ATTR_SET:
4385 case OVS_ACTION_ATTR_SET_MASKED:
4386 case OVS_ACTION_ATTR_TRUNC:
4387 case OVS_ACTION_ATTR_OUTPUT:
4388 case OVS_ACTION_ATTR_TUNNEL_PUSH:
4389 case OVS_ACTION_ATTR_TUNNEL_POP:
4390 case OVS_ACTION_ATTR_USERSPACE:
4391 case OVS_ACTION_ATTR_RECIRC:
4392 case OVS_ACTION_ATTR_CT:
0d11fc52
JR
4393 case OVS_ACTION_ATTR_PUSH_ETH:
4394 case OVS_ACTION_ATTR_POP_ETH:
1fc11c59
JS
4395 case OVS_ACTION_ATTR_ENCAP_NSH:
4396 case OVS_ACTION_ATTR_DECAP_NSH:
5dddf960 4397 case OVS_ACTION_ATTR_METER:
27d931da
AZ
4398 ofpbuf_put(b, a, nl_attr_len_pad(a, left));
4399 break;
4400
4401 case OVS_ACTION_ATTR_CLONE:
4402 /* If the clone action has been fully xlated, it can
4403 * be skipped, since any actions executed within clone
4404 * do not affect the current packet.
4405 *
4406 * When xlating actions within clone, the clone action,
4407 * because it is an nested netlink attribute, do not have
4408 * a valid 'nla_len'; it will be zero instead. Skip
4409 * the clone header to find the start of the actions
4410 * enclosed. Treat those actions as if they are written
4411 * outside of clone. */
4412 if (!a->nla_len) {
4413 bool ok;
4414 if (left < NLA_HDRLEN) {
4415 goto error;
4416 }
4417
4418 ok = xlate_fixup_actions(b, nl_attr_get_unspec(a, 0),
4419 left - NLA_HDRLEN);
4420 if (!ok) {
4421 goto error;
4422 }
4423 }
4424 break;
4425
4426 case OVS_ACTION_ATTR_SAMPLE:
4427 if (!a->nla_len) {
4428 bool ok;
4429 if (left < NLA_HDRLEN) {
4430 goto error;
4431 }
4432 const struct nlattr *attr = nl_attr_get_unspec(a, 0);
4433 left -= NLA_HDRLEN;
4434
4435 while (left > 0 &&
4436 nl_attr_type(attr) != OVS_SAMPLE_ATTR_ACTIONS) {
4437 /* Only OVS_SAMPLE_ATTR_ACTIONS can have unclosed
4438 * nested netlink attribute. */
4439 if (!attr->nla_len) {
4440 goto error;
4441 }
4442
4443 left -= NLA_ALIGN(attr->nla_len);
4444 attr = nl_attr_next(attr);
4445 }
4446
4447 if (left < NLA_HDRLEN) {
4448 goto error;
4449 }
4450
4451 ok = xlate_fixup_actions(b, nl_attr_get_unspec(attr, 0),
4452 left - NLA_HDRLEN);
4453 if (!ok) {
4454 goto error;
4455 }
4456 }
4457 break;
4458
4459 case OVS_ACTION_ATTR_UNSPEC:
4460 case __OVS_ACTION_ATTR_MAX:
4461 OVS_NOT_REACHED();
4462 }
4463 }
4464
4465 return true;
4466
4467error:
4468 ofpbuf_delete(b);
4469 return false;
4470}
4471
4472static bool
4473xlate_execute_odp_actions(struct dp_packet *packet,
4474 const struct nlattr *actions, int actions_len)
4475{
4476 struct dp_packet_batch batch;
4477 struct ofpbuf *b = ofpbuf_new(actions_len);
4478
4479 if (!xlate_fixup_actions(b, actions, actions_len)) {
4480 return false;
4481 }
4482
4483 dp_packet_batch_init_packet(&batch, packet);
4484 odp_execute_actions(NULL, &batch, false, b->data, b->size, NULL);
4485 ofpbuf_delete(b);
4486
4487 return true;
4488}
4489
9583bc14
EJ
4490static void
4491execute_controller_action(struct xlate_ctx *ctx, int len,
4492 enum ofp_packet_in_reason reason,
bdcad671
BP
4493 uint16_t controller_id,
4494 const uint8_t *userdata, size_t userdata_len)
9583bc14 4495{
e14deea0 4496 struct dp_packet *packet;
9583bc14 4497
04594cd5 4498 ctx->xout->slow |= SLOW_CONTROLLER;
b476e2f2 4499 xlate_commit_actions(ctx);
9583bc14
EJ
4500 if (!ctx->xin->packet) {
4501 return;
4502 }
4503
df70a773
JR
4504 if (!ctx->xin->allow_side_effects && !ctx->xin->xcache) {
4505 return;
4506 }
4507
cf62fa4c 4508 packet = dp_packet_clone(ctx->xin->packet);
27d931da
AZ
4509 if (!xlate_execute_odp_actions(packet, ctx->odp_actions->data,
4510 ctx->odp_actions->size)) {
4511 xlate_report_error(ctx, "Failed to execute controller action");
4512 dp_packet_delete(packet);
4513 return;
4514 }
beb75a40 4515
9bfe9334
BP
4516 /* A packet sent by an action in a table-miss rule is considered an
4517 * explicit table miss. OpenFlow before 1.3 doesn't have that concept so
4518 * it will get translated back to OFPR_ACTION for those versions. */
4519 if (reason == OFPR_ACTION
07a3cd5c 4520 && ctx->rule && rule_is_table_miss(&ctx->rule->up)) {
9bfe9334
BP
4521 reason = OFPR_EXPLICIT_MISS;
4522 }
4523
4524 size_t packet_len = dp_packet_size(packet);
0fb7792a 4525
a2b53dec
BP
4526 struct ofproto_async_msg *am = xmalloc(sizeof *am);
4527 *am = (struct ofproto_async_msg) {
9bfe9334 4528 .controller_id = controller_id,
a2b53dec
BP
4529 .oam = OAM_PACKET_IN,
4530 .pin = {
4531 .up = {
4d617a87 4532 .base = {
77ab5fd2
BP
4533 .packet = dp_packet_steal_data(packet),
4534 .packet_len = packet_len,
4535 .reason = reason,
4536 .table_id = ctx->table_id,
4537 .cookie = ctx->rule_cookie,
4538 .userdata = (userdata_len
4539 ? xmemdup(userdata, userdata_len)
4540 : NULL),
4541 .userdata_len = userdata_len,
4542 }
a2b53dec
BP
4543 },
4544 .max_len = len,
9bfe9334 4545 },
9bfe9334 4546 };
4d617a87 4547 flow_get_metadata(&ctx->xin->flow, &am->pin.up.base.flow_metadata);
9583bc14 4548
df70a773
JR
4549 /* Async messages are only sent once, so if we send one now, no
4550 * xlate cache entry is created. */
4551 if (ctx->xin->allow_side_effects) {
4552 ofproto_dpif_send_async_msg(ctx->xbridge->ofproto, am);
4553 } else /* xcache */ {
4554 struct xc_entry *entry;
4555
4556 entry = xlate_cache_add_entry(ctx->xin->xcache, XC_CONTROLLER);
4557 entry->controller.ofproto = ctx->xbridge->ofproto;
4558 entry->controller.am = am;
4559 }
3b4fff43
RM
4560
4561 dp_packet_delete(packet);
9583bc14
EJ
4562}
4563
7bbdd84f 4564static void
77ab5fd2 4565emit_continuation(struct xlate_ctx *ctx, const struct frozen_state *state)
7bbdd84f 4566{
df70a773
JR
4567 if (!ctx->xin->allow_side_effects && !ctx->xin->xcache) {
4568 return;
4569 }
4570
77ab5fd2
BP
4571 struct ofproto_async_msg *am = xmalloc(sizeof *am);
4572 *am = (struct ofproto_async_msg) {
4573 .controller_id = ctx->pause->controller_id,
4574 .oam = OAM_PACKET_IN,
4575 .pin = {
4576 .up = {
4d617a87 4577 .base = {
77ab5fd2
BP
4578 .userdata = xmemdup(ctx->pause->userdata,
4579 ctx->pause->userdata_len),
4580 .userdata_len = ctx->pause->userdata_len,
4581 .packet = xmemdup(dp_packet_data(ctx->xin->packet),
4582 dp_packet_size(ctx->xin->packet)),
4583 .packet_len = dp_packet_size(ctx->xin->packet),
0b024e49 4584 .reason = ctx->pause->reason,
77ab5fd2 4585 },
07a3cd5c 4586 .bridge = ctx->xbridge->ofproto->uuid,
84cf3c1f
JR
4587 .stack = xmemdup(state->stack, state->stack_size),
4588 .stack_size = state->stack_size,
77ab5fd2
BP
4589 .mirrors = state->mirrors,
4590 .conntracked = state->conntracked,
4591 .actions = xmemdup(state->ofpacts, state->ofpacts_len),
4592 .actions_len = state->ofpacts_len,
4593 .action_set = xmemdup(state->action_set,
4594 state->action_set_len),
4595 .action_set_len = state->action_set_len,
4596 },
4597 .max_len = UINT16_MAX,
4598 },
4599 };
4d617a87 4600 flow_get_metadata(ctx->paused_flow, &am->pin.up.base.flow_metadata);
df70a773
JR
4601
4602 /* Async messages are only sent once, so if we send one now, no
4603 * xlate cache entry is created. */
4604 if (ctx->xin->allow_side_effects) {
4605 ofproto_dpif_send_async_msg(ctx->xbridge->ofproto, am);
4606 } else /* xcache */ {
4607 struct xc_entry *entry;
4608
4609 entry = xlate_cache_add_entry(ctx->xin->xcache, XC_CONTROLLER);
4610 entry->controller.ofproto = ctx->xbridge->ofproto;
4611 entry->controller.am = am;
4612 }
77ab5fd2 4613}
7bbdd84f 4614
e6bc8e74
YHW
4615/* Creates a frozen state, and allocates a unique recirc id for the given
4616 * state. Returns a non-zero recirc id if it is allocated successfully.
4617 * Returns 0 otherwise.
4618 **/
4619static uint32_t
77ab5fd2
BP
4620finish_freezing__(struct xlate_ctx *ctx, uint8_t table)
4621{
e6bc8e74 4622 uint32_t id = 0;
1d361a81 4623 ovs_assert(ctx->freezing);
7bbdd84f 4624
1d361a81 4625 struct frozen_state state = {
07659514 4626 .table_id = table,
07a3cd5c 4627 .ofproto_uuid = ctx->xbridge->ofproto->uuid,
5c1b2314 4628 .stack = ctx->stack.data,
84cf3c1f 4629 .stack_size = ctx->stack.size,
29bae541 4630 .mirrors = ctx->mirrors,
07659514 4631 .conntracked = ctx->conntracked,
1d361a81
BP
4632 .ofpacts = ctx->frozen_actions.data,
4633 .ofpacts_len = ctx->frozen_actions.size,
417509fa 4634 .action_set = ctx->action_set.data,
8a5fb3b4 4635 .action_set_len = ctx->action_set.size,
2082425c 4636 };
77ab5fd2 4637 frozen_metadata_from_flow(&state.metadata, &ctx->xin->flow);
2082425c 4638
77ab5fd2
BP
4639 if (ctx->pause) {
4640 if (ctx->xin->packet) {
4641 emit_continuation(ctx, &state);
4642 }
4643 } else {
4644 /* Allocate a unique recirc id for the given metadata state in the
4645 * flow. An existing id, with a new reference to the corresponding
4646 * recirculation context, will be returned if possible.
4647 * The life-cycle of this recirc id is managed by associating it
4648 * with the udpif key ('ukey') created for each new datapath flow. */
e6bc8e74 4649 id = recirc_alloc_id_ctx(&state);
77ab5fd2 4650 if (!id) {
2d9b49dd 4651 xlate_report_error(ctx, "Failed to allocate recirculation id");
77ab5fd2 4652 ctx->error = XLATE_NO_RECIRCULATION_CONTEXT;
e6bc8e74 4653 return 0;
77ab5fd2
BP
4654 }
4655 recirc_refs_add(&ctx->xout->recircs, id);
7bbdd84f 4656
53cc166a
JR
4657 if (ctx->recirc_update_dp_hash) {
4658 struct ovs_action_hash *act_hash;
4659
4660 /* Hash action. */
4661 act_hash = nl_msg_put_unspec_uninit(ctx->odp_actions,
4662 OVS_ACTION_ATTR_HASH,
4663 sizeof *act_hash);
4664 act_hash->hash_alg = OVS_HASH_ALG_L4; /* Make configurable. */
4665 act_hash->hash_basis = 0; /* Make configurable. */
4666 }
77ab5fd2
BP
4667 nl_msg_put_u32(ctx->odp_actions, OVS_ACTION_ATTR_RECIRC, id);
4668 }
e672ff9b 4669
1d361a81
BP
4670 /* Undo changes done by freezing. */
4671 ctx_cancel_freeze(ctx);
e6bc8e74 4672 return id;
7bbdd84f
SH
4673}
4674
1d361a81 4675/* Called only when we're freezing. */
07659514 4676static void
77ab5fd2 4677finish_freezing(struct xlate_ctx *ctx)
07659514
JS
4678{
4679 xlate_commit_actions(ctx);
77ab5fd2 4680 finish_freezing__(ctx, 0);
07659514
JS
4681}
4682
e37b8437
JS
4683/* Fork the pipeline here. The current packet will continue processing the
4684 * current action list. A clone of the current packet will recirculate, skip
4685 * the remainder of the current action list and asynchronously resume pipeline
4686 * processing in 'table' with the current metadata and action set. */
4687static void
4688compose_recirculate_and_fork(struct xlate_ctx *ctx, uint8_t table)
4689{
e6bc8e74 4690 uint32_t recirc_id;
1d361a81 4691 ctx->freezing = true;
e6bc8e74
YHW
4692 recirc_id = finish_freezing__(ctx, table);
4693
4694 if (OVS_UNLIKELY(ctx->xin->trace) && recirc_id) {
4695 if (oftrace_add_recirc_node(ctx->xin->recirc_queue,
4696 OFT_RECIRC_CONNTRACK, &ctx->xin->flow,
4697 ctx->xin->packet, recirc_id)) {
4698 xlate_report(ctx, OFT_DETAIL, "A clone of the packet is forked to "
4699 "recirculate. The forked pipeline will be resumed at "
4700 "table %u.", table);
4701 } else {
4702 xlate_report(ctx, OFT_DETAIL, "Failed to trace the conntrack "
4703 "forked pipeline with recirc_id = %d.", recirc_id);
4704 }
4705 }
e37b8437
JS
4706}
4707
8bfd0fda
BP
4708static void
4709compose_mpls_push_action(struct xlate_ctx *ctx, struct ofpact_push_mpls *mpls)
9583bc14 4710{
33bf9176 4711 struct flow *flow = &ctx->xin->flow;
8bfd0fda 4712 int n;
33bf9176 4713
8bfd0fda 4714 ovs_assert(eth_type_mpls(mpls->ethertype));
b0a17866 4715
49a73e0c 4716 n = flow_count_mpls_labels(flow, ctx->wc);
8bfd0fda 4717 if (!n) {
704bb0bf 4718 xlate_commit_actions(ctx);
8bfd0fda
BP
4719 } else if (n >= FLOW_MAX_MPLS_LABELS) {
4720 if (ctx->xin->packet != NULL) {
2d9b49dd
BP
4721 xlate_report_error(ctx, "dropping packet on which an MPLS push "
4722 "action can't be performed as it would have "
4723 "more MPLS LSEs than the %d supported.",
4724 FLOW_MAX_MPLS_LABELS);
9583bc14 4725 }
fff1b9c0 4726 ctx->error = XLATE_TOO_MANY_MPLS_LABELS;
8bfd0fda 4727 return;
9583bc14 4728 }
b0a17866 4729
742c0ac3
JR
4730 /* Update flow's MPLS stack, and clear L3/4 fields to mark them invalid. */
4731 flow_push_mpls(flow, n, mpls->ethertype, ctx->wc, true);
9583bc14
EJ
4732}
4733
8bfd0fda 4734static void
9cfef3d0 4735compose_mpls_pop_action(struct xlate_ctx *ctx, ovs_be16 eth_type)
9583bc14 4736{
8bfd0fda 4737 struct flow *flow = &ctx->xin->flow;
49a73e0c 4738 int n = flow_count_mpls_labels(flow, ctx->wc);
33bf9176 4739
49a73e0c 4740 if (flow_pop_mpls(flow, n, eth_type, ctx->wc)) {
8bf009bf 4741 if (!eth_type_mpls(eth_type) && ctx->xbridge->support.odp.recirc) {
e12ec36b 4742 ctx->was_mpls = true;
7bbdd84f
SH
4743 }
4744 } else if (n >= FLOW_MAX_MPLS_LABELS) {
8bfd0fda 4745 if (ctx->xin->packet != NULL) {
2d9b49dd
BP
4746 xlate_report_error(ctx, "dropping packet on which an "
4747 "MPLS pop action can't be performed as it has "
4748 "more MPLS LSEs than the %d supported.",
4749 FLOW_MAX_MPLS_LABELS);
8bfd0fda 4750 }
fff1b9c0 4751 ctx->error = XLATE_TOO_MANY_MPLS_LABELS;
1520ef4f 4752 ofpbuf_clear(ctx->odp_actions);
9583bc14
EJ
4753 }
4754}
4755
4756static bool
4757compose_dec_ttl(struct xlate_ctx *ctx, struct ofpact_cnt_ids *ids)
4758{
33bf9176
BP
4759 struct flow *flow = &ctx->xin->flow;
4760
4761 if (!is_ip_any(flow)) {
9583bc14
EJ
4762 return false;
4763 }
4764
49a73e0c 4765 ctx->wc->masks.nw_ttl = 0xff;
33bf9176
BP
4766 if (flow->nw_ttl > 1) {
4767 flow->nw_ttl--;
9583bc14
EJ
4768 return false;
4769 } else {
4770 size_t i;
4771
4772 for (i = 0; i < ids->n_controllers; i++) {
4773 execute_controller_action(ctx, UINT16_MAX, OFPR_INVALID_TTL,
bdcad671 4774 ids->cnt_ids[i], NULL, 0);
9583bc14
EJ
4775 }
4776
4777 /* Stop processing for current table. */
2d9b49dd
BP
4778 xlate_report(ctx, OFT_WARN, "IPv%d decrement TTL exception",
4779 flow->dl_type == htons(ETH_TYPE_IP) ? 4 : 6);
9583bc14
EJ
4780 return true;
4781 }
4782}
4783
8bfd0fda 4784static void
097d4939
JR
4785compose_set_mpls_label_action(struct xlate_ctx *ctx, ovs_be32 label)
4786{
8bfd0fda 4787 if (eth_type_mpls(ctx->xin->flow.dl_type)) {
49a73e0c 4788 ctx->wc->masks.mpls_lse[0] |= htonl(MPLS_LABEL_MASK);
8bfd0fda 4789 set_mpls_lse_label(&ctx->xin->flow.mpls_lse[0], label);
097d4939 4790 }
097d4939
JR
4791}
4792
8bfd0fda 4793static void
097d4939
JR
4794compose_set_mpls_tc_action(struct xlate_ctx *ctx, uint8_t tc)
4795{
8bfd0fda 4796 if (eth_type_mpls(ctx->xin->flow.dl_type)) {
49a73e0c 4797 ctx->wc->masks.mpls_lse[0] |= htonl(MPLS_TC_MASK);
8bfd0fda 4798 set_mpls_lse_tc(&ctx->xin->flow.mpls_lse[0], tc);
097d4939 4799 }
097d4939
JR
4800}
4801
8bfd0fda 4802static void
9cfef3d0 4803compose_set_mpls_ttl_action(struct xlate_ctx *ctx, uint8_t ttl)
9583bc14 4804{
8bfd0fda 4805 if (eth_type_mpls(ctx->xin->flow.dl_type)) {
49a73e0c 4806 ctx->wc->masks.mpls_lse[0] |= htonl(MPLS_TTL_MASK);
8bfd0fda 4807 set_mpls_lse_ttl(&ctx->xin->flow.mpls_lse[0], ttl);
b0a17866 4808 }
9583bc14
EJ
4809}
4810
4811static bool
9cfef3d0 4812compose_dec_mpls_ttl_action(struct xlate_ctx *ctx)
9583bc14 4813{
33bf9176 4814 struct flow *flow = &ctx->xin->flow;
1dd35f8a 4815
8bfd0fda 4816 if (eth_type_mpls(flow->dl_type)) {
22d38fca
JR
4817 uint8_t ttl = mpls_lse_to_ttl(flow->mpls_lse[0]);
4818
49a73e0c 4819 ctx->wc->masks.mpls_lse[0] |= htonl(MPLS_TTL_MASK);
8bfd0fda
BP
4820 if (ttl > 1) {
4821 ttl--;
4822 set_mpls_lse_ttl(&flow->mpls_lse[0], ttl);
4823 return false;
4824 } else {
bdcad671
BP
4825 execute_controller_action(ctx, UINT16_MAX, OFPR_INVALID_TTL, 0,
4826 NULL, 0);
8bfd0fda 4827 }
9583bc14 4828 }
22d38fca
JR
4829
4830 /* Stop processing for current table. */
2d9b49dd 4831 xlate_report(ctx, OFT_WARN, "MPLS decrement TTL exception");
22d38fca 4832 return true;
9583bc14
EJ
4833}
4834
4835static void
4836xlate_output_action(struct xlate_ctx *ctx,
4e022ec0 4837 ofp_port_t port, uint16_t max_len, bool may_packet_in)
9583bc14 4838{
2031ef97 4839 ofp_port_t prev_nf_output_iface = ctx->nf_output_iface;
9583bc14 4840
2031ef97 4841 ctx->nf_output_iface = NF_OUT_DROP;
9583bc14
EJ
4842
4843 switch (port) {
4844 case OFPP_IN_PORT:
e93ef1c7 4845 compose_output_action(ctx, ctx->xin->flow.in_port.ofp_port, NULL);
9583bc14
EJ
4846 break;
4847 case OFPP_TABLE:
4e022ec0 4848 xlate_table_action(ctx, ctx->xin->flow.in_port.ofp_port,
2cd20955 4849 0, may_packet_in, true, false);
9583bc14
EJ
4850 break;
4851 case OFPP_NORMAL:
4852 xlate_normal(ctx);
4853 break;
4854 case OFPP_FLOOD:
4855 flood_packets(ctx, false);
4856 break;
4857 case OFPP_ALL:
4858 flood_packets(ctx, true);
4859 break;
4860 case OFPP_CONTROLLER:
3a11fd5b 4861 execute_controller_action(ctx, max_len,
331c07ac
YHW
4862 (ctx->in_packet_out ? OFPR_PACKET_OUT
4863 : ctx->in_group ? OFPR_GROUP
029ca940
SS
4864 : ctx->in_action_set ? OFPR_ACTION_SET
4865 : OFPR_ACTION),
bdcad671 4866 0, NULL, 0);
9583bc14
EJ
4867 break;
4868 case OFPP_NONE:
4869 break;
4870 case OFPP_LOCAL:
4871 default:
4e022ec0 4872 if (port != ctx->xin->flow.in_port.ofp_port) {
e93ef1c7 4873 compose_output_action(ctx, port, NULL);
9583bc14 4874 } else {
2d9b49dd 4875 xlate_report(ctx, OFT_WARN, "skipping output to input port");
9583bc14
EJ
4876 }
4877 break;
4878 }
4879
4880 if (prev_nf_output_iface == NF_OUT_FLOOD) {
2031ef97
BP
4881 ctx->nf_output_iface = NF_OUT_FLOOD;
4882 } else if (ctx->nf_output_iface == NF_OUT_DROP) {
4883 ctx->nf_output_iface = prev_nf_output_iface;
9583bc14 4884 } else if (prev_nf_output_iface != NF_OUT_DROP &&
2031ef97
BP
4885 ctx->nf_output_iface != NF_OUT_FLOOD) {
4886 ctx->nf_output_iface = NF_OUT_MULTI;
9583bc14
EJ
4887 }
4888}
4889
4890static void
4891xlate_output_reg_action(struct xlate_ctx *ctx,
4892 const struct ofpact_output_reg *or)
4893{
4894 uint64_t port = mf_get_subfield(&or->src, &ctx->xin->flow);
4895 if (port <= UINT16_MAX) {
2d9b49dd
BP
4896 xlate_report(ctx, OFT_DETAIL, "output port is %"PRIu64, port);
4897
9583bc14
EJ
4898 union mf_subvalue value;
4899
4900 memset(&value, 0xff, sizeof value);
49a73e0c 4901 mf_write_subfield_flow(&or->src, &value, &ctx->wc->masks);
2d9b49dd
BP
4902 xlate_output_action(ctx, u16_to_ofp(port), or->max_len, false);
4903 } else {
4904 xlate_report(ctx, OFT_WARN, "output port %"PRIu64" is out of range",
4905 port);
9583bc14
EJ
4906 }
4907}
4908
aaca4fe0
WT
4909static void
4910xlate_output_trunc_action(struct xlate_ctx *ctx,
4911 ofp_port_t port, uint32_t max_len)
4912{
4913 bool support_trunc = ctx->xbridge->support.trunc;
4914 struct ovs_action_trunc *trunc;
2f2b904f 4915 char name[OFP10_MAX_PORT_NAME_LEN];
aaca4fe0
WT
4916
4917 switch (port) {
4918 case OFPP_TABLE:
4919 case OFPP_NORMAL:
4920 case OFPP_FLOOD:
4921 case OFPP_ALL:
4922 case OFPP_CONTROLLER:
4923 case OFPP_NONE:
50f96b10 4924 ofputil_port_to_string(port, NULL, name, sizeof name);
2d9b49dd
BP
4925 xlate_report(ctx, OFT_WARN,
4926 "output_trunc does not support port: %s", name);
aaca4fe0
WT
4927 break;
4928 case OFPP_LOCAL:
4929 case OFPP_IN_PORT:
4930 default:
4931 if (port != ctx->xin->flow.in_port.ofp_port) {
4932 const struct xport *xport = get_ofp_port(ctx->xbridge, port);
4933
4934 if (xport == NULL || xport->odp_port == ODPP_NONE) {
4935 /* Since truncate happens at its following output action, if
4936 * the output port is a patch port, the behavior is somehow
49f17344 4937 * unpredictable. For simplicity, disallow this case. */
50f96b10 4938 ofputil_port_to_string(port, NULL, name, sizeof name);
2d9b49dd
BP
4939 xlate_report_error(ctx, "output_trunc does not support "
4940 "patch port %s", name);
aaca4fe0
WT
4941 break;
4942 }
4943
4944 trunc = nl_msg_put_unspec_uninit(ctx->odp_actions,
4945 OVS_ACTION_ATTR_TRUNC,
4946 sizeof *trunc);
4947 trunc->max_len = max_len;
4948 xlate_output_action(ctx, port, max_len, false);
4949 if (!support_trunc) {
4950 ctx->xout->slow |= SLOW_ACTION;
4951 }
4952 } else {
2d9b49dd 4953 xlate_report(ctx, OFT_WARN, "skipping output to input port");
aaca4fe0
WT
4954 }
4955 break;
4956 }
4957}
4958
9583bc14
EJ
4959static void
4960xlate_enqueue_action(struct xlate_ctx *ctx,
4961 const struct ofpact_enqueue *enqueue)
4962{
4e022ec0 4963 ofp_port_t ofp_port = enqueue->port;
9583bc14
EJ
4964 uint32_t queue_id = enqueue->queue;
4965 uint32_t flow_priority, priority;
4966 int error;
4967
4968 /* Translate queue to priority. */
89a8a7f0 4969 error = dpif_queue_to_priority(ctx->xbridge->dpif, queue_id, &priority);
9583bc14
EJ
4970 if (error) {
4971 /* Fall back to ordinary output action. */
4972 xlate_output_action(ctx, enqueue->port, 0, false);
4973 return;
4974 }
4975
4976 /* Check output port. */
4977 if (ofp_port == OFPP_IN_PORT) {
4e022ec0
AW
4978 ofp_port = ctx->xin->flow.in_port.ofp_port;
4979 } else if (ofp_port == ctx->xin->flow.in_port.ofp_port) {
9583bc14
EJ
4980 return;
4981 }
4982
4983 /* Add datapath actions. */
4984 flow_priority = ctx->xin->flow.skb_priority;
4985 ctx->xin->flow.skb_priority = priority;
e93ef1c7 4986 compose_output_action(ctx, ofp_port, NULL);
9583bc14
EJ
4987 ctx->xin->flow.skb_priority = flow_priority;
4988
4989 /* Update NetFlow output port. */
2031ef97
BP
4990 if (ctx->nf_output_iface == NF_OUT_DROP) {
4991 ctx->nf_output_iface = ofp_port;
4992 } else if (ctx->nf_output_iface != NF_OUT_FLOOD) {
4993 ctx->nf_output_iface = NF_OUT_MULTI;
9583bc14
EJ
4994 }
4995}
4996
4997static void
4998xlate_set_queue_action(struct xlate_ctx *ctx, uint32_t queue_id)
4999{
5000 uint32_t skb_priority;
5001
89a8a7f0 5002 if (!dpif_queue_to_priority(ctx->xbridge->dpif, queue_id, &skb_priority)) {
9583bc14
EJ
5003 ctx->xin->flow.skb_priority = skb_priority;
5004 } else {
5005 /* Couldn't translate queue to a priority. Nothing to do. A warning
5006 * has already been logged. */
5007 }
5008}
5009
5010static bool
46c88433 5011slave_enabled_cb(ofp_port_t ofp_port, void *xbridge_)
9583bc14 5012{
46c88433
EJ
5013 const struct xbridge *xbridge = xbridge_;
5014 struct xport *port;
9583bc14
EJ
5015
5016 switch (ofp_port) {
5017 case OFPP_IN_PORT:
5018 case OFPP_TABLE:
5019 case OFPP_NORMAL:
5020 case OFPP_FLOOD:
5021 case OFPP_ALL:
5022 case OFPP_NONE:
5023 return true;
5024 case OFPP_CONTROLLER: /* Not supported by the bundle action. */
5025 return false;
5026 default:
46c88433 5027 port = get_ofp_port(xbridge, ofp_port);
9583bc14
EJ
5028 return port ? port->may_enable : false;
5029 }
5030}
5031
5032static void
5033xlate_bundle_action(struct xlate_ctx *ctx,
5034 const struct ofpact_bundle *bundle)
5035{
4e022ec0 5036 ofp_port_t port;
9583bc14 5037
49a73e0c 5038 port = bundle_execute(bundle, &ctx->xin->flow, ctx->wc, slave_enabled_cb,
46c88433 5039 CONST_CAST(struct xbridge *, ctx->xbridge));
9583bc14 5040 if (bundle->dst.field) {
49a73e0c 5041 nxm_reg_load(&bundle->dst, ofp_to_u16(port), &ctx->xin->flow, ctx->wc);
2d9b49dd 5042 xlate_report_subfield(ctx, &bundle->dst);
9583bc14
EJ
5043 } else {
5044 xlate_output_action(ctx, port, 0, false);
5045 }
5046}
5047
4165b5e0
JS
5048static void
5049xlate_learn_action(struct xlate_ctx *ctx, const struct ofpact_learn *learn)
5050{
49a73e0c 5051 learn_mask(learn, ctx->wc);
9583bc14 5052
df70a773 5053 if (ctx->xin->xcache || ctx->xin->allow_side_effects) {
4165b5e0
JS
5054 uint64_t ofpacts_stub[1024 / 8];
5055 struct ofputil_flow_mod fm;
2c7ee524 5056 struct ofproto_flow_mod ofm__, *ofm;
4165b5e0 5057 struct ofpbuf ofpacts;
2c7ee524
JR
5058 enum ofperr error;
5059
5060 if (ctx->xin->xcache) {
3f3b97b0 5061 ofm = xmalloc(sizeof *ofm);
2c7ee524
JR
5062 } else {
5063 ofm = &ofm__;
5064 }
4165b5e0
JS
5065
5066 ofpbuf_use_stub(&ofpacts, ofpacts_stub, sizeof ofpacts_stub);
2c7ee524 5067 learn_execute(learn, &ctx->xin->flow, &fm, &ofpacts);
2d9b49dd
BP
5068 if (OVS_UNLIKELY(ctx->xin->trace)) {
5069 struct ds s = DS_EMPTY_INITIALIZER;
5070 ds_put_format(&s, "table=%"PRIu8" ", fm.table_id);
50f96b10 5071 match_format(&fm.match, NULL, &s, OFP_DEFAULT_PRIORITY);
2d9b49dd
BP
5072 ds_chomp(&s, ' ');
5073 ds_put_format(&s, " priority=%d", fm.priority);
5074 if (fm.new_cookie) {
5075 ds_put_format(&s, " cookie=%#"PRIx64, ntohll(fm.new_cookie));
5076 }
5077 if (fm.idle_timeout != OFP_FLOW_PERMANENT) {
5078 ds_put_format(&s, " idle=%"PRIu16, fm.idle_timeout);
5079 }
5080 if (fm.hard_timeout != OFP_FLOW_PERMANENT) {
5081 ds_put_format(&s, " hard=%"PRIu16, fm.hard_timeout);
5082 }
5083 if (fm.flags & NX_LEARN_F_SEND_FLOW_REM) {
5084 ds_put_cstr(&s, " send_flow_rem");
5085 }
5086 ds_put_cstr(&s, " actions=");
50f96b10 5087 ofpacts_format(fm.ofpacts, fm.ofpacts_len, NULL, &s);
2d9b49dd
BP
5088 xlate_report(ctx, OFT_DETAIL, "%s", ds_cstr(&s));
5089 ds_destroy(&s);
5090 }
2c7ee524
JR
5091 error = ofproto_dpif_flow_mod_init_for_learn(ctx->xbridge->ofproto,
5092 &fm, ofm);
4165b5e0 5093 ofpbuf_uninit(&ofpacts);
2c7ee524 5094
3f3b97b0 5095 if (!error) {
4c71600d 5096 bool success = true;
3f3b97b0 5097 if (ctx->xin->allow_side_effects) {
4c71600d
DDP
5098 error = ofproto_flow_mod_learn(ofm, ctx->xin->xcache != NULL,
5099 learn->limit, &success);
5100 } else if (learn->limit) {
5101 if (!ofm->temp_rule
5102 || ofm->temp_rule->state != RULE_INSERTED) {
5103 /* The learned rule expired and there are no packets, so
5104 * we cannot learn again. Since the translated actions
5105 * depend on the result of learning, we tell the caller
5106 * that there's no point in caching this result. */
5107 ctx->xout->avoid_caching = true;
5108 }
3f3b97b0
DDP
5109 }
5110
4c71600d
DDP
5111 if (learn->flags & NX_LEARN_F_WRITE_RESULT) {
5112 nxm_reg_load(&learn->result_dst, success ? 1 : 0,
5113 &ctx->xin->flow, ctx->wc);
5114 xlate_report_subfield(ctx, &learn->result_dst);
5115 }
5116
5117 if (success && ctx->xin->xcache) {
3f3b97b0
DDP
5118 struct xc_entry *entry;
5119
5120 entry = xlate_cache_add_entry(ctx->xin->xcache, XC_LEARN);
5121 entry->learn.ofm = ofm;
4c71600d 5122 entry->learn.limit = learn->limit;
3f3b97b0
DDP
5123 ofm = NULL;
5124 }
4c71600d
DDP
5125
5126 if (OVS_UNLIKELY(ctx->xin->trace && !success)) {
5127 xlate_report(ctx, OFT_DETAIL, "Limit exceeded, learn failed");
5128 }
3f3b97b0
DDP
5129 }
5130
5131 if (ctx->xin->xcache) {
5132 free(ofm);
2c7ee524
JR
5133 }
5134
5135 if (error) {
2d9b49dd
BP
5136 xlate_report_error(ctx, "LEARN action execution failed (%s).",
5137 ofperr_to_string(error));
2c7ee524 5138 }
2d9b49dd
BP
5139 } else {
5140 xlate_report(ctx, OFT_WARN,
5141 "suppressing side effects, so learn action ignored");
b256dc52
JS
5142 }
5143}
5144
5145static void
5146xlate_fin_timeout__(struct rule_dpif *rule, uint16_t tcp_flags,
5147 uint16_t idle_timeout, uint16_t hard_timeout)
5148{
5149 if (tcp_flags & (TCP_FIN | TCP_RST)) {
07a3cd5c 5150 ofproto_rule_reduce_timeouts(&rule->up, idle_timeout, hard_timeout);
b256dc52 5151 }
9583bc14
EJ
5152}
5153
9583bc14
EJ
5154static void
5155xlate_fin_timeout(struct xlate_ctx *ctx,
5156 const struct ofpact_fin_timeout *oft)
5157{
b256dc52 5158 if (ctx->rule) {
df70a773
JR
5159 if (ctx->xin->allow_side_effects) {
5160 xlate_fin_timeout__(ctx->rule, ctx->xin->tcp_flags,
5161 oft->fin_idle_timeout, oft->fin_hard_timeout);
5162 }
b256dc52
JS
5163 if (ctx->xin->xcache) {
5164 struct xc_entry *entry;
5165
5166 entry = xlate_cache_add_entry(ctx->xin->xcache, XC_FIN_TIMEOUT);
83709dfa
JR
5167 /* XC_RULE already holds a reference on the rule, none is taken
5168 * here. */
901a517e
JR
5169 entry->fin.rule = ctx->rule;
5170 entry->fin.idle = oft->fin_idle_timeout;
5171 entry->fin.hard = oft->fin_hard_timeout;
b256dc52 5172 }
9583bc14
EJ
5173 }
5174}
5175
5176static void
5177xlate_sample_action(struct xlate_ctx *ctx,
5178 const struct ofpact_sample *os)
5179{
f69f713b
BY
5180 odp_port_t output_odp_port = ODPP_NONE;
5181 odp_port_t tunnel_out_port = ODPP_NONE;
5182 struct dpif_ipfix *ipfix = ctx->xbridge->ipfix;
5183 bool emit_set_tunnel = false;
5184
5185 if (!ipfix || ctx->xin->flow.in_port.ofp_port == OFPP_NONE) {
5186 return;
5187 }
5188
e824d78d
JR
5189 /* Scale the probability from 16-bit to 32-bit while representing
5190 * the same percentage. */
5191 uint32_t probability = (os->probability << 16) | os->probability;
5192
b440dd8c 5193 if (!ctx->xbridge->support.variable_length_userdata) {
2d9b49dd
BP
5194 xlate_report_error(ctx, "ignoring NXAST_SAMPLE action because "
5195 "datapath lacks support (needs Linux 3.10+ or "
5196 "kernel module from OVS 1.11+)");
e824d78d
JR
5197 return;
5198 }
5199
f69f713b
BY
5200 /* If ofp_port in flow sample action is equel to ofp_port,
5201 * this sample action is a input port action. */
5202 if (os->sampling_port != OFPP_NONE &&
5203 os->sampling_port != ctx->xin->flow.in_port.ofp_port) {
5204 output_odp_port = ofp_port_to_odp_port(ctx->xbridge,
5205 os->sampling_port);
5206 if (output_odp_port == ODPP_NONE) {
2d9b49dd
BP
5207 xlate_report_error(ctx, "can't use unknown port %d in flow sample "
5208 "action", os->sampling_port);
f69f713b
BY
5209 return;
5210 }
5211
5212 if (dpif_ipfix_get_flow_exporter_tunnel_sampling(ipfix,
5213 os->collector_set_id)
5214 && dpif_ipfix_get_tunnel_port(ipfix, output_odp_port)) {
5215 tunnel_out_port = output_odp_port;
5216 emit_set_tunnel = true;
5217 }
5218 }
5219
5220 xlate_commit_actions(ctx);
5221 /* If 'emit_set_tunnel', sample(sampling_port=1) would translate
5222 * into datapath sample action set(tunnel(...)), sample(...) and
5223 * it is used for sampling egress tunnel information. */
5224 if (emit_set_tunnel) {
5225 const struct xport *xport = get_ofp_port(ctx->xbridge,
5226 os->sampling_port);
5227
5228 if (xport && xport->is_tunnel) {
5229 struct flow *flow = &ctx->xin->flow;
5230 tnl_port_send(xport->ofport, flow, ctx->wc);
5231 if (!ovs_native_tunneling_is_on(ctx->xbridge->ofproto)) {
5232 struct flow_tnl flow_tnl = flow->tunnel;
5233
5234 commit_odp_tunnel_action(flow, &ctx->base_flow,
5235 ctx->odp_actions);
5236 flow->tunnel = flow_tnl;
5237 }
5238 } else {
2d9b49dd
BP
5239 xlate_report_error(ctx,
5240 "sampling_port:%d should be a tunnel port.",
5241 os->sampling_port);
f69f713b
BY
5242 }
5243 }
e824d78d 5244
a6092018
BP
5245 union user_action_cookie cookie = {
5246 .flow_sample = {
5247 .type = USER_ACTION_COOKIE_FLOW_SAMPLE,
5248 .probability = os->probability,
5249 .collector_set_id = os->collector_set_id,
5250 .obs_domain_id = os->obs_domain_id,
5251 .obs_point_id = os->obs_point_id,
f69f713b 5252 .output_odp_port = output_odp_port,
4930ea56 5253 .direction = os->direction,
a6092018
BP
5254 }
5255 };
5256 compose_sample_action(ctx, probability, &cookie, sizeof cookie.flow_sample,
f69f713b 5257 tunnel_out_port, false);
9583bc14
EJ
5258}
5259
eee69393
AZ
5260/* Determine if an datapath action translated from the openflow action
5261 * can be reversed by another datapath action.
5262 *
5263 * Openflow actions that do not emit datapath actions are trivially
5264 * reversible. Reversiblity of other actions depends on nature of
5265 * action and their translation. */
5266static bool
5267reversible_actions(const struct ofpact *ofpacts, size_t ofpacts_len)
bef503e8 5268{
eee69393 5269 const struct ofpact *a;
bef503e8 5270
eee69393
AZ
5271 OFPACT_FOR_EACH (a, ofpacts, ofpacts_len) {
5272 switch (a->type) {
5273 case OFPACT_BUNDLE:
5274 case OFPACT_CLEAR_ACTIONS:
5275 case OFPACT_CLONE:
5276 case OFPACT_CONJUNCTION:
5277 case OFPACT_CONTROLLER:
5278 case OFPACT_CT_CLEAR:
5279 case OFPACT_DEBUG_RECIRC:
5280 case OFPACT_DEC_MPLS_TTL:
5281 case OFPACT_DEC_TTL:
5282 case OFPACT_ENQUEUE:
5283 case OFPACT_EXIT:
5284 case OFPACT_FIN_TIMEOUT:
5285 case OFPACT_GOTO_TABLE:
5286 case OFPACT_GROUP:
5287 case OFPACT_LEARN:
5288 case OFPACT_MULTIPATH:
5289 case OFPACT_NOTE:
5290 case OFPACT_OUTPUT:
5291 case OFPACT_OUTPUT_REG:
5292 case OFPACT_POP_MPLS:
5293 case OFPACT_POP_QUEUE:
5294 case OFPACT_PUSH_MPLS:
5295 case OFPACT_PUSH_VLAN:
5296 case OFPACT_REG_MOVE:
5297 case OFPACT_RESUBMIT:
5298 case OFPACT_SAMPLE:
5299 case OFPACT_SET_ETH_DST:
5300 case OFPACT_SET_ETH_SRC:
5301 case OFPACT_SET_FIELD:
5302 case OFPACT_SET_IP_DSCP:
5303 case OFPACT_SET_IP_ECN:
5304 case OFPACT_SET_IP_TTL:
5305 case OFPACT_SET_IPV4_DST:
5306 case OFPACT_SET_IPV4_SRC:
5307 case OFPACT_SET_L4_DST_PORT:
5308 case OFPACT_SET_L4_SRC_PORT:
5309 case OFPACT_SET_MPLS_LABEL:
5310 case OFPACT_SET_MPLS_TC:
5311 case OFPACT_SET_MPLS_TTL:
5312 case OFPACT_SET_QUEUE:
5313 case OFPACT_SET_TUNNEL:
5314 case OFPACT_SET_VLAN_PCP:
5315 case OFPACT_SET_VLAN_VID:
5316 case OFPACT_STACK_POP:
5317 case OFPACT_STACK_PUSH:
5318 case OFPACT_STRIP_VLAN:
5319 case OFPACT_UNROLL_XLATE:
5320 case OFPACT_WRITE_ACTIONS:
5321 case OFPACT_WRITE_METADATA:
5322 break;
5323
5324 case OFPACT_CT:
5325 case OFPACT_METER:
5326 case OFPACT_NAT:
5327 case OFPACT_OUTPUT_TRUNC:
2142be1f
BP
5328 case OFPACT_ENCAP:
5329 case OFPACT_DECAP:
eee69393 5330 return false;
9c2a44dc 5331 }
456024cb 5332 }
eee69393 5333 return true;
bef503e8
AZ
5334}
5335
5336static void
c9f0a445
AZ
5337clone_xlate_actions(const struct ofpact *actions, size_t actions_len,
5338 struct xlate_ctx *ctx)
7ae62a67 5339{
b827b231
BP
5340 struct ofpbuf old_stack = ctx->stack;
5341 union mf_subvalue new_stack[1024 / sizeof(union mf_subvalue)];
5342 ofpbuf_use_stub(&ctx->stack, new_stack, sizeof new_stack);
5343 ofpbuf_put(&ctx->stack, old_stack.data, old_stack.size);
5344
5345 struct ofpbuf old_action_set = ctx->action_set;
5346 uint64_t actset_stub[1024 / 8];
5347 ofpbuf_use_stub(&ctx->action_set, actset_stub, sizeof actset_stub);
5348 ofpbuf_put(&ctx->action_set, old_action_set.data, old_action_set.size);
5349
eee69393 5350 size_t offset, ac_offset;
eee69393
AZ
5351 struct flow old_flow = ctx->xin->flow;
5352
c9f0a445 5353 if (reversible_actions(actions, actions_len)) {
eee69393 5354 old_flow = ctx->xin->flow;
c9f0a445 5355 do_xlate_actions(actions, actions_len, ctx);
60eebf12
AZ
5356 if (ctx->freezing) {
5357 finish_freezing(ctx);
5358 }
eee69393
AZ
5359 goto xlate_done;
5360 }
5361
5362 /* Commit datapath actions before emitting the clone action to
5363 * avoid emitting those actions twice. Once inside
5364 * the clone, another time for the action after clone. */
5365 xlate_commit_actions(ctx);
9c2a44dc 5366 struct flow old_base = ctx->base_flow;
eee69393
AZ
5367 bool old_was_mpls = ctx->was_mpls;
5368 bool old_conntracked = ctx->conntracked;
ba653d2a 5369
eee69393
AZ
5370 /* The actions are not reversible, a datapath clone action is
5371 * required to encode the translation. Select the clone action
5372 * based on datapath capabilities. */
5373 if (ctx->xbridge->support.clone) { /* Use clone action */
5374 /* Use clone action as datapath clone. */
5375 offset = nl_msg_start_nested(ctx->odp_actions, OVS_ACTION_ATTR_CLONE);
c9f0a445 5376 do_xlate_actions(actions, actions_len, ctx);
60eebf12
AZ
5377 if (ctx->freezing) {
5378 finish_freezing(ctx);
5379 }
eee69393
AZ
5380 nl_msg_end_non_empty_nested(ctx->odp_actions, offset);
5381 goto dp_clone_done;
5382 }
b827b231 5383
eee69393
AZ
5384 if (ctx->xbridge->support.sample_nesting > 3) {
5385 /* Use sample action as datapath clone. */
5386 offset = nl_msg_start_nested(ctx->odp_actions, OVS_ACTION_ATTR_SAMPLE);
5387 ac_offset = nl_msg_start_nested(ctx->odp_actions,
5388 OVS_SAMPLE_ATTR_ACTIONS);
c9f0a445 5389 do_xlate_actions(actions, actions_len, ctx);
60eebf12
AZ
5390 if (ctx->freezing) {
5391 finish_freezing(ctx);
5392 }
eee69393
AZ
5393 if (nl_msg_end_non_empty_nested(ctx->odp_actions, ac_offset)) {
5394 nl_msg_cancel_nested(ctx->odp_actions, offset);
5395 } else {
5396 nl_msg_put_u32(ctx->odp_actions, OVS_SAMPLE_ATTR_PROBABILITY,
5397 UINT32_MAX); /* 100% probability. */
5398 nl_msg_end_nested(ctx->odp_actions, offset);
5399 }
5400 goto dp_clone_done;
5401 }
5402
5403 /* Datapath does not support clone, skip xlate 'oc' and
5404 * report an error */
5405 xlate_report_error(ctx, "Failed to compose clone action");
ba653d2a 5406
eee69393 5407dp_clone_done:
ba653d2a
BP
5408 /* The clone's conntrack execution should have no effect on the original
5409 * packet. */
5410 ctx->conntracked = old_conntracked;
bd3c2df3
BP
5411
5412 /* Popping MPLS from the clone should have no effect on the original
5413 * packet. */
5414 ctx->was_mpls = old_was_mpls;
eee69393
AZ
5415
5416 /* Restore the 'base_flow' for the next action. */
5417 ctx->base_flow = old_base;
5418
5419xlate_done:
5420 ofpbuf_uninit(&ctx->action_set);
5421 ctx->action_set = old_action_set;
5422 ofpbuf_uninit(&ctx->stack);
5423 ctx->stack = old_stack;
5424 ctx->xin->flow = old_flow;
7ae62a67
WT
5425}
5426
c9f0a445
AZ
5427static void
5428compose_clone(struct xlate_ctx *ctx, const struct ofpact_nest *oc)
5429{
5430 size_t oc_actions_len = ofpact_nest_get_action_len(oc);
5431
5432 clone_xlate_actions(oc->actions, oc_actions_len, ctx);
5433}
5434
076caa2f
JR
5435static void
5436xlate_meter_action(struct xlate_ctx *ctx, const struct ofpact_meter *meter)
5437{
5438 if (meter->provider_meter_id != UINT32_MAX) {
5439 nl_msg_put_u32(ctx->odp_actions, OVS_ACTION_ATTR_METER,
5440 meter->provider_meter_id);
5441 }
5442}
5443
9583bc14 5444static bool
46c88433 5445may_receive(const struct xport *xport, struct xlate_ctx *ctx)
9583bc14 5446{
bbbca389 5447 if (xport->config & (is_stp(&ctx->xin->flow)
46c88433
EJ
5448 ? OFPUTIL_PC_NO_RECV_STP
5449 : OFPUTIL_PC_NO_RECV)) {
9583bc14
EJ
5450 return false;
5451 }
5452
5453 /* Only drop packets here if both forwarding and learning are
5454 * disabled. If just learning is enabled, we need to have
5455 * OFPP_NORMAL and the learning action have a look at the packet
5456 * before we can drop it. */
9efd308e
DV
5457 if ((!xport_stp_forward_state(xport) && !xport_stp_learn_state(xport)) ||
5458 (!xport_rstp_forward_state(xport) && !xport_rstp_learn_state(xport))) {
9583bc14
EJ
5459 return false;
5460 }
5461
5462 return true;
5463}
5464
7fdb60a7 5465static void
7e7e8dbb
BP
5466xlate_write_actions__(struct xlate_ctx *ctx,
5467 const struct ofpact *ofpacts, size_t ofpacts_len)
7fdb60a7 5468{
c61f3870
BP
5469 /* Maintain actset_output depending on the contents of the action set:
5470 *
5471 * - OFPP_UNSET, if there is no "output" action.
5472 *
5473 * - The output port, if there is an "output" action and no "group"
5474 * action.
5475 *
5476 * - OFPP_UNSET, if there is a "group" action.
5477 */
5478 if (!ctx->action_set_has_group) {
7e7e8dbb
BP
5479 const struct ofpact *a;
5480 OFPACT_FOR_EACH (a, ofpacts, ofpacts_len) {
5481 if (a->type == OFPACT_OUTPUT) {
5482 ctx->xin->flow.actset_output = ofpact_get_OUTPUT(a)->port;
5483 } else if (a->type == OFPACT_GROUP) {
c61f3870
BP
5484 ctx->xin->flow.actset_output = OFPP_UNSET;
5485 ctx->action_set_has_group = true;
9055ca9a 5486 break;
c61f3870
BP
5487 }
5488 }
5489 }
5490
7e7e8dbb
BP
5491 ofpbuf_put(&ctx->action_set, ofpacts, ofpacts_len);
5492}
5493
5494static void
5495xlate_write_actions(struct xlate_ctx *ctx, const struct ofpact_nest *a)
5496{
5497 xlate_write_actions__(ctx, a->actions, ofpact_nest_get_action_len(a));
7fdb60a7
SH
5498}
5499
5500static void
5501xlate_action_set(struct xlate_ctx *ctx)
5502{
2d9b49dd
BP
5503 uint64_t action_list_stub[1024 / 8];
5504 struct ofpbuf action_list = OFPBUF_STUB_INITIALIZER(action_list_stub);
7fdb60a7 5505 ofpacts_execute_action_set(&action_list, &ctx->action_set);
ed9c9e3e
JR
5506 /* Clear the action set, as it is not needed any more. */
5507 ofpbuf_clear(&ctx->action_set);
2d9b49dd
BP
5508 if (action_list.size) {
5509 ctx->in_action_set = true;
5510
5511 struct ovs_list *old_trace = ctx->xin->trace;
5512 ctx->xin->trace = xlate_report(ctx, OFT_TABLE,
5513 "--. Executing action set:");
5514 do_xlate_actions(action_list.data, action_list.size, ctx);
5515 ctx->xin->trace = old_trace;
5516
5517 ctx->in_action_set = false;
5518 }
7fdb60a7
SH
5519 ofpbuf_uninit(&action_list);
5520}
5521
e672ff9b 5522static void
1d361a81 5523freeze_put_unroll_xlate(struct xlate_ctx *ctx)
e672ff9b 5524{
1d361a81 5525 struct ofpact_unroll_xlate *unroll = ctx->frozen_actions.header;
e672ff9b
JR
5526
5527 /* Restore the table_id and rule cookie for a potential PACKET
5528 * IN if needed. */
5529 if (!unroll ||
5530 (ctx->table_id != unroll->rule_table_id
5531 || ctx->rule_cookie != unroll->rule_cookie)) {
1d361a81 5532 unroll = ofpact_put_UNROLL_XLATE(&ctx->frozen_actions);
e672ff9b
JR
5533 unroll->rule_table_id = ctx->table_id;
5534 unroll->rule_cookie = ctx->rule_cookie;
1d361a81 5535 ctx->frozen_actions.header = unroll;
e672ff9b
JR
5536 }
5537}
5538
5539
1d361a81
BP
5540/* Copy actions 'a' through 'end' to ctx->frozen_actions, which will be
5541 * executed after thawing. Inserts an UNROLL_XLATE action, if none is already
5542 * present, before any action that may depend on the current table ID or flow
5543 * cookie. */
e672ff9b 5544static void
1d361a81 5545freeze_unroll_actions(const struct ofpact *a, const struct ofpact *end,
e672ff9b
JR
5546 struct xlate_ctx *ctx)
5547{
c2b283b7 5548 for (; a < end; a = ofpact_next(a)) {
e672ff9b 5549 switch (a->type) {
e672ff9b 5550 case OFPACT_OUTPUT_REG:
aaca4fe0 5551 case OFPACT_OUTPUT_TRUNC:
e672ff9b
JR
5552 case OFPACT_GROUP:
5553 case OFPACT_OUTPUT:
5554 case OFPACT_CONTROLLER:
5555 case OFPACT_DEC_MPLS_TTL:
5556 case OFPACT_DEC_TTL:
83a31283
BP
5557 /* These actions may generate asynchronous messages, which include
5558 * table ID and flow cookie information. */
1d361a81 5559 freeze_put_unroll_xlate(ctx);
e672ff9b
JR
5560 break;
5561
83a31283
BP
5562 case OFPACT_RESUBMIT:
5563 if (ofpact_get_RESUBMIT(a)->table_id == 0xff) {
5564 /* This resubmit action is relative to the current table, so we
5565 * need to track what table that is.*/
1d361a81 5566 freeze_put_unroll_xlate(ctx);
83a31283
BP
5567 }
5568 break;
5569
e672ff9b
JR
5570 case OFPACT_SET_TUNNEL:
5571 case OFPACT_REG_MOVE:
5572 case OFPACT_SET_FIELD:
5573 case OFPACT_STACK_PUSH:
5574 case OFPACT_STACK_POP:
5575 case OFPACT_LEARN:
5576 case OFPACT_WRITE_METADATA:
83a31283 5577 case OFPACT_GOTO_TABLE:
e672ff9b
JR
5578 case OFPACT_ENQUEUE:
5579 case OFPACT_SET_VLAN_VID:
5580 case OFPACT_SET_VLAN_PCP:
5581 case OFPACT_STRIP_VLAN:
5582 case OFPACT_PUSH_VLAN:
5583 case OFPACT_SET_ETH_SRC:
5584 case OFPACT_SET_ETH_DST:
5585 case OFPACT_SET_IPV4_SRC:
5586 case OFPACT_SET_IPV4_DST:
5587 case OFPACT_SET_IP_DSCP:
5588 case OFPACT_SET_IP_ECN:
5589 case OFPACT_SET_IP_TTL:
5590 case OFPACT_SET_L4_SRC_PORT:
5591 case OFPACT_SET_L4_DST_PORT:
5592 case OFPACT_SET_QUEUE:
5593 case OFPACT_POP_QUEUE:
5594 case OFPACT_PUSH_MPLS:
5595 case OFPACT_POP_MPLS:
5596 case OFPACT_SET_MPLS_LABEL:
5597 case OFPACT_SET_MPLS_TC:
5598 case OFPACT_SET_MPLS_TTL:
5599 case OFPACT_MULTIPATH:
5600 case OFPACT_BUNDLE:
5601 case OFPACT_EXIT:
5602 case OFPACT_UNROLL_XLATE:
5603 case OFPACT_FIN_TIMEOUT:
5604 case OFPACT_CLEAR_ACTIONS:
5605 case OFPACT_WRITE_ACTIONS:
5606 case OFPACT_METER:
5607 case OFPACT_SAMPLE:
7ae62a67 5608 case OFPACT_CLONE:
f839892a
JS
5609 case OFPACT_ENCAP:
5610 case OFPACT_DECAP:
d4abaff5 5611 case OFPACT_DEBUG_RECIRC:
07659514 5612 case OFPACT_CT:
72fe7578 5613 case OFPACT_CT_CLEAR:
9ac0aada 5614 case OFPACT_NAT:
83a31283 5615 /* These may not generate PACKET INs. */
e672ff9b
JR
5616 break;
5617
e672ff9b
JR
5618 case OFPACT_NOTE:
5619 case OFPACT_CONJUNCTION:
83a31283 5620 /* These need not be copied for restoration. */
e672ff9b
JR
5621 continue;
5622 }
5623 /* Copy the action over. */
1d361a81 5624 ofpbuf_put(&ctx->frozen_actions, a, OFPACT_ALIGN(a->len));
e672ff9b
JR
5625 }
5626}
5627
8e53fe8c 5628static void
f2d105b5
JS
5629put_ct_mark(const struct flow *flow, struct ofpbuf *odp_actions,
5630 struct flow_wildcards *wc)
8e53fe8c 5631{
2a754f4a
JS
5632 if (wc->masks.ct_mark) {
5633 struct {
5634 uint32_t key;
5635 uint32_t mask;
5636 } *odp_ct_mark;
5637
5638 odp_ct_mark = nl_msg_put_unspec_uninit(odp_actions, OVS_CT_ATTR_MARK,
5639 sizeof(*odp_ct_mark));
5640 odp_ct_mark->key = flow->ct_mark & wc->masks.ct_mark;
5641 odp_ct_mark->mask = wc->masks.ct_mark;
8e53fe8c
JS
5642 }
5643}
5644
9daf2348 5645static void
f2d105b5
JS
5646put_ct_label(const struct flow *flow, struct ofpbuf *odp_actions,
5647 struct flow_wildcards *wc)
9daf2348 5648{
2ff8484b 5649 if (!ovs_u128_is_zero(wc->masks.ct_label)) {
9daf2348
JS
5650 struct {
5651 ovs_u128 key;
5652 ovs_u128 mask;
89cf41ec 5653 } odp_ct_label;
9daf2348 5654
89cf41ec
BP
5655 odp_ct_label.key = ovs_u128_and(flow->ct_label, wc->masks.ct_label);
5656 odp_ct_label.mask = wc->masks.ct_label;
5657 nl_msg_put_unspec(odp_actions, OVS_CT_ATTR_LABELS,
5658 &odp_ct_label, sizeof odp_ct_label);
9daf2348
JS
5659 }
5660}
5661
d787ad39 5662static void
2d9b49dd
BP
5663put_ct_helper(struct xlate_ctx *ctx,
5664 struct ofpbuf *odp_actions, struct ofpact_conntrack *ofc)
d787ad39
JS
5665{
5666 if (ofc->alg) {
40c7b2fc
JS
5667 switch(ofc->alg) {
5668 case IPPORT_FTP:
d787ad39 5669 nl_msg_put_string(odp_actions, OVS_CT_ATTR_HELPER, "ftp");
40c7b2fc
JS
5670 break;
5671 case IPPORT_TFTP:
5672 nl_msg_put_string(odp_actions, OVS_CT_ATTR_HELPER, "tftp");
5673 break;
5674 default:
2d9b49dd 5675 xlate_report_error(ctx, "cannot serialize ct_helper %d", ofc->alg);
40c7b2fc 5676 break;
d787ad39
JS
5677 }
5678 }
5679}
5680
9ac0aada
JR
5681static void
5682put_ct_nat(struct xlate_ctx *ctx)
5683{
5684 struct ofpact_nat *ofn = ctx->ct_nat_action;
5685 size_t nat_offset;
5686
5687 if (!ofn) {
5688 return;
5689 }
5690
5691 nat_offset = nl_msg_start_nested(ctx->odp_actions, OVS_CT_ATTR_NAT);
5692 if (ofn->flags & NX_NAT_F_SRC || ofn->flags & NX_NAT_F_DST) {
5693 nl_msg_put_flag(ctx->odp_actions, ofn->flags & NX_NAT_F_SRC
5694 ? OVS_NAT_ATTR_SRC : OVS_NAT_ATTR_DST);
5695 if (ofn->flags & NX_NAT_F_PERSISTENT) {
5696 nl_msg_put_flag(ctx->odp_actions, OVS_NAT_ATTR_PERSISTENT);
5697 }
5698 if (ofn->flags & NX_NAT_F_PROTO_HASH) {
5699 nl_msg_put_flag(ctx->odp_actions, OVS_NAT_ATTR_PROTO_HASH);
5700 } else if (ofn->flags & NX_NAT_F_PROTO_RANDOM) {
5701 nl_msg_put_flag(ctx->odp_actions, OVS_NAT_ATTR_PROTO_RANDOM);
5702 }
5703 if (ofn->range_af == AF_INET) {
73e8bc23 5704 nl_msg_put_be32(ctx->odp_actions, OVS_NAT_ATTR_IP_MIN,
9ac0aada
JR
5705 ofn->range.addr.ipv4.min);
5706 if (ofn->range.addr.ipv4.max &&
73e8bc23
BP
5707 (ntohl(ofn->range.addr.ipv4.max)
5708 > ntohl(ofn->range.addr.ipv4.min))) {
5709 nl_msg_put_be32(ctx->odp_actions, OVS_NAT_ATTR_IP_MAX,
5710 ofn->range.addr.ipv4.max);
9ac0aada
JR
5711 }
5712 } else if (ofn->range_af == AF_INET6) {
5713 nl_msg_put_unspec(ctx->odp_actions, OVS_NAT_ATTR_IP_MIN,
5714 &ofn->range.addr.ipv6.min,
5715 sizeof ofn->range.addr.ipv6.min);
5716 if (!ipv6_mask_is_any(&ofn->range.addr.ipv6.max) &&
5717 memcmp(&ofn->range.addr.ipv6.max, &ofn->range.addr.ipv6.min,
5718 sizeof ofn->range.addr.ipv6.max) > 0) {
5719 nl_msg_put_unspec(ctx->odp_actions, OVS_NAT_ATTR_IP_MAX,
5720 &ofn->range.addr.ipv6.max,
5721 sizeof ofn->range.addr.ipv6.max);
5722 }
5723 }
5724 if (ofn->range_af != AF_UNSPEC && ofn->range.proto.min) {
5725 nl_msg_put_u16(ctx->odp_actions, OVS_NAT_ATTR_PROTO_MIN,
5726 ofn->range.proto.min);
5727 if (ofn->range.proto.max &&
5728 ofn->range.proto.max > ofn->range.proto.min) {
5729 nl_msg_put_u16(ctx->odp_actions, OVS_NAT_ATTR_PROTO_MAX,
5730 ofn->range.proto.max);
5731 }
5732 }
5733 }
5734 nl_msg_end_nested(ctx->odp_actions, nat_offset);
5735}
5736
07659514
JS
5737static void
5738compose_conntrack_action(struct xlate_ctx *ctx, struct ofpact_conntrack *ofc)
5739{
f2d105b5 5740 ovs_u128 old_ct_label_mask = ctx->wc->masks.ct_label;
f2d105b5 5741 uint32_t old_ct_mark_mask = ctx->wc->masks.ct_mark;
07659514
JS
5742 size_t ct_offset;
5743 uint16_t zone;
5744
5745 /* Ensure that any prior actions are applied before composing the new
5746 * conntrack action. */
5747 xlate_commit_actions(ctx);
5748
8e53fe8c 5749 /* Process nested actions first, to populate the key. */
9ac0aada 5750 ctx->ct_nat_action = NULL;
f2d105b5 5751 ctx->wc->masks.ct_mark = 0;
f6fabcc6 5752 ctx->wc->masks.ct_label = OVS_U128_ZERO;
8e53fe8c
JS
5753 do_xlate_actions(ofc->actions, ofpact_ct_get_action_len(ofc), ctx);
5754
07659514
JS
5755 if (ofc->zone_src.field) {
5756 zone = mf_get_subfield(&ofc->zone_src, &ctx->xin->flow);
5757 } else {
5758 zone = ofc->zone_imm;
5759 }
5760
5761 ct_offset = nl_msg_start_nested(ctx->odp_actions, OVS_ACTION_ATTR_CT);
5762 if (ofc->flags & NX_CT_F_COMMIT) {
a76a37ef
JR
5763 nl_msg_put_flag(ctx->odp_actions, ofc->flags & NX_CT_F_FORCE ?
5764 OVS_CT_ATTR_FORCE_COMMIT : OVS_CT_ATTR_COMMIT);
adfe7a0b
JR
5765 if (ctx->xbridge->support.ct_eventmask) {
5766 nl_msg_put_u32(ctx->odp_actions, OVS_CT_ATTR_EVENTMASK,
975954af 5767 OVS_CT_EVENTMASK_DEFAULT);
adfe7a0b 5768 }
07659514
JS
5769 }
5770 nl_msg_put_u16(ctx->odp_actions, OVS_CT_ATTR_ZONE, zone);
f2d105b5
JS
5771 put_ct_mark(&ctx->xin->flow, ctx->odp_actions, ctx->wc);
5772 put_ct_label(&ctx->xin->flow, ctx->odp_actions, ctx->wc);
2d9b49dd 5773 put_ct_helper(ctx, ctx->odp_actions, ofc);
9ac0aada
JR
5774 put_ct_nat(ctx);
5775 ctx->ct_nat_action = NULL;
07659514
JS
5776 nl_msg_end_nested(ctx->odp_actions, ct_offset);
5777
f2d105b5 5778 ctx->wc->masks.ct_mark = old_ct_mark_mask;
f2d105b5 5779 ctx->wc->masks.ct_label = old_ct_label_mask;
8e53fe8c 5780
f6fabcc6 5781 if (ofc->recirc_table != NX_CT_RECIRC_NONE) {
07659514 5782 ctx->conntracked = true;
e37b8437 5783 compose_recirculate_and_fork(ctx, ofc->recirc_table);
07659514 5784 }
f6fabcc6
JP
5785
5786 /* The ct_* fields are only available in the scope of the 'recirc_table'
5787 * call chain. */
5788 flow_clear_conntrack(&ctx->xin->flow);
5789 ctx->conntracked = false;
07659514
JS
5790}
5791
f839892a
JS
5792static void
5793rewrite_flow_encap_ethernet(struct xlate_ctx *ctx,
5794 struct flow *flow,
5795 struct flow_wildcards *wc)
5796{
5797 wc->masks.packet_type = OVS_BE32_MAX;
5798 if (pt_ns(flow->packet_type) == OFPHTN_ETHERTYPE) {
5799 /* Only adjust the packet_type and zero the dummy Ethernet addresses. */
5800 ovs_be16 ethertype = pt_ns_type_be(flow->packet_type);
5801 flow->packet_type = htonl(PT_ETH);
5802 flow->dl_src = eth_addr_zero;
5803 flow->dl_dst = eth_addr_zero;
5804 flow->dl_type = ethertype;
5805 } else {
1fc11c59 5806 /* Error handling: drop packet. */
f839892a 5807 xlate_report_debug(ctx, OFT_ACTION,
1fc11c59
JS
5808 "Dropping packet as encap(ethernet) is not "
5809 "supported for packet type ethernet.");
f839892a
JS
5810 ctx->error = 1;
5811 }
5812}
5813
1fc11c59
JS
5814/* For an MD2 NSH header returns a pointer to an ofpbuf with the encoded
5815 * MD2 TLVs provided as encap properties to the encap operation. This
5816 * will be stored as encap_data in the ctx and copied into the encap_nsh
5817 * action at the next commit. */
5818static struct ofpbuf *
5819rewrite_flow_encap_nsh(struct xlate_ctx *ctx,
5820 const struct ofpact_encap *encap,
5821 struct flow *flow,
5822 struct flow_wildcards *wc)
5823{
5824 ovs_be32 packet_type = flow->packet_type;
5825 const char *ptr = (char *) encap->props;
5826 struct ofpbuf *buf = ofpbuf_new(OVS_ENCAP_NSH_MAX_MD_LEN);
5827 uint8_t md_type = NSH_M_TYPE1;
5828 uint8_t np = 0;
5829 int i;
5830
5831 /* Scan the optional NSH encap TLV properties, if any. */
5832 for (i = 0; i < encap->n_props; i++) {
5833 struct ofpact_ed_prop *prop_ptr =
5834 ALIGNED_CAST(struct ofpact_ed_prop *, ptr);
5835 if (prop_ptr->prop_class == OFPPPC_NSH) {
5836 switch (prop_ptr->type) {
5837 case OFPPPT_PROP_NSH_MDTYPE: {
5838 struct ofpact_ed_prop_nsh_md_type *prop_md_type =
5839 ALIGNED_CAST(struct ofpact_ed_prop_nsh_md_type *,
5840 prop_ptr);
5841 md_type = prop_md_type->md_type;
5842 break;
5843 }
5844 case OFPPPT_PROP_NSH_TLV: {
5845 struct ofpact_ed_prop_nsh_tlv *tlv_prop =
5846 ALIGNED_CAST(struct ofpact_ed_prop_nsh_tlv *,
5847 prop_ptr);
5848 struct nsh_md2_tlv *md2_ctx =
5849 ofpbuf_put_uninit(buf, sizeof(*md2_ctx));
5850 md2_ctx->md_class = tlv_prop->tlv_class;
5851 md2_ctx->type = tlv_prop->tlv_type;
5852 md2_ctx->length = tlv_prop->tlv_len;
5853 size_t len = ROUND_UP(md2_ctx->length, 4);
5854 size_t padding = len - md2_ctx->length;
5855 ofpbuf_put(buf, tlv_prop->data, md2_ctx->length);
5856 ofpbuf_put_zeros(buf, padding);
5857 break;
5858 }
5859 default:
5860 /* No other NSH encap properties defined yet. */
5861 break;
5862 }
5863 }
5864 ptr += ROUND_UP(prop_ptr->len, 8);
5865 }
5866 if (buf->size == 0 || buf->size > OVS_ENCAP_NSH_MAX_MD_LEN) {
5867 ofpbuf_delete(buf);
5868 buf = NULL;
5869 }
5870
5871 /* Determine the Next Protocol field for NSH header. */
5872 switch (ntohl(packet_type)) {
5873 case PT_ETH:
5874 np = NSH_P_ETHERNET;
5875 break;
5876 case PT_IPV4:
5877 np = NSH_P_IPV4;
5878 break;
5879 case PT_IPV6:
5880 np = NSH_P_IPV6;
5881 break;
5882 case PT_NSH:
5883 np = NSH_P_NSH;
5884 break;
5885 default:
5886 /* Error handling: drop packet. */
5887 xlate_report_debug(ctx, OFT_ACTION,
5888 "Dropping packet as encap(nsh) is not "
5889 "supported for packet type (%d,0x%x)",
5890 pt_ns(packet_type), pt_ns_type(packet_type));
5891 ctx->error = 1;
5892 return buf;
5893 }
5894 /* Note that we have matched on packet_type! */
5895 wc->masks.packet_type = OVS_BE32_MAX;
5896
5897 /* Reset all current flow packet headers. */
5898 memset(&flow->dl_dst, 0,
5899 sizeof(struct flow) - offsetof(struct flow, dl_dst));
5900
5901 /* Populate the flow with the new NSH header. */
5902 flow->packet_type = htonl(PT_NSH);
5903 flow->dl_type = htons(ETH_TYPE_NSH);
5904 flow->nsh.flags = 0; /* */
5905 flow->nsh.np = np;
5906 flow->nsh.spi = 0;
5907 flow->nsh.si = 255;
5908
5909 if (md_type == NSH_M_TYPE1) {
5910 flow->nsh.mdtype = NSH_M_TYPE1;
5911 memset(flow->nsh.c, 0, sizeof flow->nsh.c);
5912 if (buf) {
5913 /* Drop any MD2 context TLVs. */
5914 ofpbuf_delete(buf);
5915 buf = NULL;
5916 }
5917 } else if (md_type == NSH_M_TYPE2) {
5918 flow->nsh.mdtype = NSH_M_TYPE2;
5919 }
5920
5921 return buf;
5922}
5923
f839892a
JS
5924static void
5925xlate_generic_encap_action(struct xlate_ctx *ctx,
5926 const struct ofpact_encap *encap)
5927{
5928 struct flow *flow = &ctx->xin->flow;
5929 struct flow_wildcards *wc = ctx->wc;
1fc11c59 5930 struct ofpbuf *encap_data = NULL;
f839892a
JS
5931
5932 /* Ensure that any pending actions on the inner packet are applied before
5933 * rewriting the flow */
5934 xlate_commit_actions(ctx);
5935
5936 /* Rewrite the flow to reflect the effect of pushing the new encap header. */
5937 switch (ntohl(encap->new_pkt_type)) {
5938 case PT_ETH:
5939 rewrite_flow_encap_ethernet(ctx, flow, wc);
5940 break;
1fc11c59
JS
5941 case PT_NSH:
5942 encap_data = rewrite_flow_encap_nsh(ctx, encap, flow, wc);
5943 break;
f839892a 5944 default:
1fc11c59
JS
5945 /* New packet type was checked during decoding. */
5946 OVS_NOT_REACHED();
f839892a
JS
5947 break;
5948 }
5949
5950 if (!ctx->error) {
5951 /* The actual encap datapath action will be generated at next commit. */
5952 ctx->pending_encap = true;
1fc11c59 5953 ctx->encap_data = encap_data;
f839892a
JS
5954 }
5955}
5956
5957/* Returns true if packet must be recirculated after decapsulation. */
5958static bool
5959xlate_generic_decap_action(struct xlate_ctx *ctx,
5960 const struct ofpact_decap *decap OVS_UNUSED)
5961{
5962 struct flow *flow = &ctx->xin->flow;
5963
5964 /* Ensure that any pending actions on the current packet are applied
5965 * before generating the decap action. */
5966 xlate_commit_actions(ctx);
5967
5968 /* We assume for now that the new_pkt_type is PT_USE_NEXT_PROTO. */
5969 switch (ntohl(flow->packet_type)) {
5970 case PT_ETH:
5971 if (flow->vlans[0].tci & htons(VLAN_CFI)) {
5972 /* Error handling: drop packet. */
5973 xlate_report_debug(ctx, OFT_ACTION, "Dropping packet, cannot "
5974 "decap Ethernet if VLAN is present.");
5975 ctx->error = 1;
5976 } else {
5977 /* Just change the packet_type.
5978 * Delay generating pop_eth to the next commit. */
5979 flow->packet_type = htonl(PACKET_TYPE(OFPHTN_ETHERTYPE,
5980 ntohs(flow->dl_type)));
5981 ctx->wc->masks.dl_type = OVS_BE16_MAX;
5982 }
5983 return false;
1fc11c59
JS
5984 case PT_NSH:
5985 /* The decap_nsh action is generated at the commit executed as
5986 * part of freezing the ctx for recirculation. Here we just set
5987 * the new packet type based on the NSH next protocol field. */
5988 switch (flow->nsh.np) {
5989 case NSH_P_ETHERNET:
5990 flow->packet_type = htonl(PT_ETH);
5991 break;
5992 case NSH_P_IPV4:
5993 flow->packet_type = htonl(PT_IPV4);
5994 break;
5995 case NSH_P_IPV6:
5996 flow->packet_type = htonl(PT_IPV6);
5997 break;
5998 case NSH_P_NSH:
5999 flow->packet_type = htonl(PT_NSH);
6000 break;
6001 default:
6002 /* Error handling: drop packet. */
6003 xlate_report_debug(ctx, OFT_ACTION,
6004 "Dropping packet as NSH next protocol %d "
6005 "is not supported", flow->nsh.np);
6006 ctx->error = 1;
6007 return false;
6008 break;
6009 }
6010 ctx->wc->masks.nsh.np = UINT8_MAX;
6011 /* Trigger recirculation. */
6012 return true;
f839892a 6013 default:
1fc11c59
JS
6014 /* Error handling: drop packet. */
6015 xlate_report_debug(
6016 ctx, OFT_ACTION,
6017 "Dropping packet as the decap() does not support "
6018 "packet type (%d,0x%x)",
6019 pt_ns(flow->packet_type), pt_ns_type(flow->packet_type));
f839892a
JS
6020 ctx->error = 1;
6021 return false;
6022 }
6023}
6024
e12ec36b
SH
6025static void
6026recirc_for_mpls(const struct ofpact *a, struct xlate_ctx *ctx)
6027{
6028 /* No need to recirculate if already exiting. */
6029 if (ctx->exit) {
6030 return;
6031 }
6032
6033 /* Do not consider recirculating unless the packet was previously MPLS. */
6034 if (!ctx->was_mpls) {
6035 return;
6036 }
6037
6038 /* Special case these actions, only recirculating if necessary.
6039 * This avoids the overhead of recirculation in common use-cases.
6040 */
6041 switch (a->type) {
6042
6043 /* Output actions do not require recirculation. */
6044 case OFPACT_OUTPUT:
aaca4fe0 6045 case OFPACT_OUTPUT_TRUNC:
e12ec36b
SH
6046 case OFPACT_ENQUEUE:
6047 case OFPACT_OUTPUT_REG:
6048 /* Set actions that don't touch L3+ fields do not require recirculation. */
6049 case OFPACT_SET_VLAN_VID:
6050 case OFPACT_SET_VLAN_PCP:
6051 case OFPACT_SET_ETH_SRC:
6052 case OFPACT_SET_ETH_DST:
6053 case OFPACT_SET_TUNNEL:
6054 case OFPACT_SET_QUEUE:
6055 /* If actions of a group require recirculation that can be detected
6056 * when translating them. */
6057 case OFPACT_GROUP:
6058 return;
6059
6060 /* Set field that don't touch L3+ fields don't require recirculation. */
6061 case OFPACT_SET_FIELD:
6062 if (mf_is_l3_or_higher(ofpact_get_SET_FIELD(a)->field)) {
6063 break;
6064 }
6065 return;
6066
6067 /* For simplicity, recirculate in all other cases. */
6068 case OFPACT_CONTROLLER:
6069 case OFPACT_BUNDLE:
6070 case OFPACT_STRIP_VLAN:
6071 case OFPACT_PUSH_VLAN:
6072 case OFPACT_SET_IPV4_SRC:
6073 case OFPACT_SET_IPV4_DST:
6074 case OFPACT_SET_IP_DSCP:
6075 case OFPACT_SET_IP_ECN:
6076 case OFPACT_SET_IP_TTL:
6077 case OFPACT_SET_L4_SRC_PORT:
6078 case OFPACT_SET_L4_DST_PORT:
6079 case OFPACT_REG_MOVE:
6080 case OFPACT_STACK_PUSH:
6081 case OFPACT_STACK_POP:
6082 case OFPACT_DEC_TTL:
6083 case OFPACT_SET_MPLS_LABEL:
6084 case OFPACT_SET_MPLS_TC:
6085 case OFPACT_SET_MPLS_TTL:
6086 case OFPACT_DEC_MPLS_TTL:
6087 case OFPACT_PUSH_MPLS:
6088 case OFPACT_POP_MPLS:
6089 case OFPACT_POP_QUEUE:
6090 case OFPACT_FIN_TIMEOUT:
6091 case OFPACT_RESUBMIT:
6092 case OFPACT_LEARN:
6093 case OFPACT_CONJUNCTION:
6094 case OFPACT_MULTIPATH:
6095 case OFPACT_NOTE:
6096 case OFPACT_EXIT:
6097 case OFPACT_SAMPLE:
7ae62a67 6098 case OFPACT_CLONE:
f839892a
JS
6099 case OFPACT_ENCAP:
6100 case OFPACT_DECAP:
e12ec36b
SH
6101 case OFPACT_UNROLL_XLATE:
6102 case OFPACT_CT:
72fe7578 6103 case OFPACT_CT_CLEAR:
e12ec36b
SH
6104 case OFPACT_NAT:
6105 case OFPACT_DEBUG_RECIRC:
6106 case OFPACT_METER:
6107 case OFPACT_CLEAR_ACTIONS:
6108 case OFPACT_WRITE_ACTIONS:
6109 case OFPACT_WRITE_METADATA:
6110 case OFPACT_GOTO_TABLE:
6111 default:
6112 break;
6113 }
6114
6115 /* Recirculate */
6116 ctx_trigger_freeze(ctx);
6117}
6118
2d9b49dd
BP
6119static void
6120xlate_ofpact_reg_move(struct xlate_ctx *ctx, const struct ofpact_reg_move *a)
6121{
6122 mf_subfield_copy(&a->src, &a->dst, &ctx->xin->flow, ctx->wc);
6123 xlate_report_subfield(ctx, &a->dst);
6124}
6125
6126static void
6127xlate_ofpact_stack_pop(struct xlate_ctx *ctx, const struct ofpact_stack *a)
6128{
6129 if (nxm_execute_stack_pop(a, &ctx->xin->flow, ctx->wc, &ctx->stack)) {
6130 xlate_report_subfield(ctx, &a->subfield);
6131 } else {
6132 xlate_report_error(ctx, "stack underflow");
6133 }
6134}
6135
6136/* Restore translation context data that was stored earlier. */
6137static void
6138xlate_ofpact_unroll_xlate(struct xlate_ctx *ctx,
6139 const struct ofpact_unroll_xlate *a)
6140{
6141 ctx->table_id = a->rule_table_id;
6142 ctx->rule_cookie = a->rule_cookie;
6143 xlate_report(ctx, OFT_THAW, "restored state: table=%"PRIu8", "
6144 "cookie=%#"PRIx64, a->rule_table_id, a->rule_cookie);
6145}
6146
9583bc14
EJ
6147static void
6148do_xlate_actions(const struct ofpact *ofpacts, size_t ofpacts_len,
6149 struct xlate_ctx *ctx)
6150{
49a73e0c 6151 struct flow_wildcards *wc = ctx->wc;
33bf9176 6152 struct flow *flow = &ctx->xin->flow;
9583bc14
EJ
6153 const struct ofpact *a;
6154
a36de779 6155 if (ovs_native_tunneling_is_on(ctx->xbridge->ofproto)) {
53902038 6156 tnl_neigh_snoop(flow, wc, ctx->xbridge->name);
a36de779 6157 }
f47ea021
JR
6158 /* dl_type already in the mask, not set below. */
6159
2d9b49dd
BP
6160 if (!ofpacts_len) {
6161 xlate_report(ctx, OFT_ACTION, "drop");
6162 return;
6163 }
6164
9583bc14
EJ
6165 OFPACT_FOR_EACH (a, ofpacts, ofpacts_len) {
6166 struct ofpact_controller *controller;
6167 const struct ofpact_metadata *metadata;
b2dd70be
JR
6168 const struct ofpact_set_field *set_field;
6169 const struct mf_field *mf;
9583bc14 6170
fff1b9c0
JR
6171 if (ctx->error) {
6172 break;
6173 }
6174
e12ec36b
SH
6175 recirc_for_mpls(a, ctx);
6176
e672ff9b
JR
6177 if (ctx->exit) {
6178 /* Check if need to store the remaining actions for later
6179 * execution. */
1d361a81
BP
6180 if (ctx->freezing) {
6181 freeze_unroll_actions(a, ofpact_end(ofpacts, ofpacts_len),
e672ff9b
JR
6182 ctx);
6183 }
6184 break;
7bbdd84f
SH
6185 }
6186
2d9b49dd
BP
6187 if (OVS_UNLIKELY(ctx->xin->trace)) {
6188 struct ds s = DS_EMPTY_INITIALIZER;
50f96b10 6189 ofpacts_format(a, OFPACT_ALIGN(a->len), NULL, &s);
2d9b49dd
BP
6190 xlate_report(ctx, OFT_ACTION, "%s", ds_cstr(&s));
6191 ds_destroy(&s);
6192 }
6193
9583bc14
EJ
6194 switch (a->type) {
6195 case OFPACT_OUTPUT:
6196 xlate_output_action(ctx, ofpact_get_OUTPUT(a)->port,
6197 ofpact_get_OUTPUT(a)->max_len, true);
6198 break;
6199
7395c052 6200 case OFPACT_GROUP:
f4fb341b 6201 if (xlate_group_action(ctx, ofpact_get_GROUP(a)->group_id)) {
1d741d6d 6202 /* Group could not be found. */
db88b35c
JR
6203
6204 /* XXX: Terminates action list translation, but does not
6205 * terminate the pipeline. */
f4fb341b
SH
6206 return;
6207 }
7395c052
NZ
6208 break;
6209
9583bc14
EJ
6210 case OFPACT_CONTROLLER:
6211 controller = ofpact_get_CONTROLLER(a);
77ab5fd2
BP
6212 if (controller->pause) {
6213 ctx->pause = controller;
6214 ctx->xout->slow |= SLOW_CONTROLLER;
0d3239e8 6215 *ctx->paused_flow = ctx->xin->flow;
77ab5fd2
BP
6216 ctx_trigger_freeze(ctx);
6217 a = ofpact_next(a);
6218 } else {
6219 execute_controller_action(ctx, controller->max_len,
6220 controller->reason,
6221 controller->controller_id,
6222 controller->userdata,
6223 controller->userdata_len);
6224 }
9583bc14
EJ
6225 break;
6226
6227 case OFPACT_ENQUEUE:
16194afd
DDP
6228 memset(&wc->masks.skb_priority, 0xff,
6229 sizeof wc->masks.skb_priority);
9583bc14
EJ
6230 xlate_enqueue_action(ctx, ofpact_get_ENQUEUE(a));
6231 break;
6232
6233 case OFPACT_SET_VLAN_VID:
f0fb825a
EG
6234 wc->masks.vlans[0].tci |= htons(VLAN_VID_MASK | VLAN_CFI);
6235 if (flow->vlans[0].tci & htons(VLAN_CFI) ||
ca287d20 6236 ofpact_get_SET_VLAN_VID(a)->push_vlan_if_needed) {
f0fb825a
EG
6237 if (!flow->vlans[0].tpid) {
6238 flow->vlans[0].tpid = htons(ETH_TYPE_VLAN);
6239 }
6240 flow->vlans[0].tci &= ~htons(VLAN_VID_MASK);
6241 flow->vlans[0].tci |=
6242 (htons(ofpact_get_SET_VLAN_VID(a)->vlan_vid) |
6243 htons(VLAN_CFI));
ca287d20 6244 }
9583bc14
EJ
6245 break;
6246
6247 case OFPACT_SET_VLAN_PCP:
f0fb825a
EG
6248 wc->masks.vlans[0].tci |= htons(VLAN_PCP_MASK | VLAN_CFI);
6249 if (flow->vlans[0].tci & htons(VLAN_CFI) ||
ca287d20 6250 ofpact_get_SET_VLAN_PCP(a)->push_vlan_if_needed) {
f0fb825a
EG
6251 if (!flow->vlans[0].tpid) {
6252 flow->vlans[0].tpid = htons(ETH_TYPE_VLAN);
6253 }
6254 flow->vlans[0].tci &= ~htons(VLAN_PCP_MASK);
6255 flow->vlans[0].tci |=
6256 htons((ofpact_get_SET_VLAN_PCP(a)->vlan_pcp
6257 << VLAN_PCP_SHIFT) | VLAN_CFI);
ca287d20 6258 }
9583bc14
EJ
6259 break;
6260
6261 case OFPACT_STRIP_VLAN:
f0fb825a 6262 flow_pop_vlan(flow, wc);
9583bc14
EJ
6263 break;
6264
6265 case OFPACT_PUSH_VLAN:
f0fb825a
EG
6266 flow_push_vlan_uninit(flow, wc);
6267 flow->vlans[0].tpid = ofpact_get_PUSH_VLAN(a)->ethertype;
6268 flow->vlans[0].tci = htons(VLAN_CFI);
9583bc14
EJ
6269 break;
6270
6271 case OFPACT_SET_ETH_SRC:
74ff3298
JR
6272 WC_MASK_FIELD(wc, dl_src);
6273 flow->dl_src = ofpact_get_SET_ETH_SRC(a)->mac;
9583bc14
EJ
6274 break;
6275
6276 case OFPACT_SET_ETH_DST:
74ff3298
JR
6277 WC_MASK_FIELD(wc, dl_dst);
6278 flow->dl_dst = ofpact_get_SET_ETH_DST(a)->mac;
9583bc14
EJ
6279 break;
6280
6281 case OFPACT_SET_IPV4_SRC:
33bf9176 6282 if (flow->dl_type == htons(ETH_TYPE_IP)) {
f47ea021 6283 memset(&wc->masks.nw_src, 0xff, sizeof wc->masks.nw_src);
33bf9176 6284 flow->nw_src = ofpact_get_SET_IPV4_SRC(a)->ipv4;
9583bc14
EJ
6285 }
6286 break;
6287
6288 case OFPACT_SET_IPV4_DST:
33bf9176 6289 if (flow->dl_type == htons(ETH_TYPE_IP)) {
f47ea021 6290 memset(&wc->masks.nw_dst, 0xff, sizeof wc->masks.nw_dst);
33bf9176 6291 flow->nw_dst = ofpact_get_SET_IPV4_DST(a)->ipv4;
9583bc14
EJ
6292 }
6293 break;
6294
04f01c24
BP
6295 case OFPACT_SET_IP_DSCP:
6296 if (is_ip_any(flow)) {
f47ea021 6297 wc->masks.nw_tos |= IP_DSCP_MASK;
33bf9176 6298 flow->nw_tos &= ~IP_DSCP_MASK;
04f01c24 6299 flow->nw_tos |= ofpact_get_SET_IP_DSCP(a)->dscp;
9583bc14
EJ
6300 }
6301 break;
6302
ff14eb7a
JR
6303 case OFPACT_SET_IP_ECN:
6304 if (is_ip_any(flow)) {
6305 wc->masks.nw_tos |= IP_ECN_MASK;
6306 flow->nw_tos &= ~IP_ECN_MASK;
6307 flow->nw_tos |= ofpact_get_SET_IP_ECN(a)->ecn;
6308 }
6309 break;
6310
0c20dbe4
JR
6311 case OFPACT_SET_IP_TTL:
6312 if (is_ip_any(flow)) {
6313 wc->masks.nw_ttl = 0xff;
6314 flow->nw_ttl = ofpact_get_SET_IP_TTL(a)->ttl;
6315 }
6316 break;
6317
9583bc14 6318 case OFPACT_SET_L4_SRC_PORT:
b8778a0d 6319 if (is_ip_any(flow) && !(flow->nw_frag & FLOW_NW_FRAG_LATER)) {
f47ea021
JR
6320 memset(&wc->masks.nw_proto, 0xff, sizeof wc->masks.nw_proto);
6321 memset(&wc->masks.tp_src, 0xff, sizeof wc->masks.tp_src);
33bf9176 6322 flow->tp_src = htons(ofpact_get_SET_L4_SRC_PORT(a)->port);
9583bc14
EJ
6323 }
6324 break;
6325
6326 case OFPACT_SET_L4_DST_PORT:
b8778a0d 6327 if (is_ip_any(flow) && !(flow->nw_frag & FLOW_NW_FRAG_LATER)) {
f47ea021
JR
6328 memset(&wc->masks.nw_proto, 0xff, sizeof wc->masks.nw_proto);
6329 memset(&wc->masks.tp_dst, 0xff, sizeof wc->masks.tp_dst);
33bf9176 6330 flow->tp_dst = htons(ofpact_get_SET_L4_DST_PORT(a)->port);
9583bc14
EJ
6331 }
6332 break;
6333
6334 case OFPACT_RESUBMIT:
8bf009bf
JR
6335 /* Freezing complicates resubmit. Some action in the flow
6336 * entry found by resubmit might trigger freezing. If that
6337 * happens, then we do not want to execute the resubmit again after
6338 * during thawing, so we want to skip back to the head of the loop
6339 * to avoid that, only adding any actions that follow the resubmit
6340 * to the frozen actions.
6b1c5734 6341 */
9583bc14 6342 xlate_ofpact_resubmit(ctx, ofpact_get_RESUBMIT(a));
6b1c5734 6343 continue;
9583bc14
EJ
6344
6345 case OFPACT_SET_TUNNEL:
33bf9176 6346 flow->tunnel.tun_id = htonll(ofpact_get_SET_TUNNEL(a)->tun_id);
9583bc14
EJ
6347 break;
6348
6349 case OFPACT_SET_QUEUE:
16194afd
DDP
6350 memset(&wc->masks.skb_priority, 0xff,
6351 sizeof wc->masks.skb_priority);
9583bc14
EJ
6352 xlate_set_queue_action(ctx, ofpact_get_SET_QUEUE(a)->queue_id);
6353 break;
6354
6355 case OFPACT_POP_QUEUE:
16194afd
DDP
6356 memset(&wc->masks.skb_priority, 0xff,
6357 sizeof wc->masks.skb_priority);
2d9b49dd
BP
6358 if (flow->skb_priority != ctx->orig_skb_priority) {
6359 flow->skb_priority = ctx->orig_skb_priority;
6360 xlate_report(ctx, OFT_DETAIL, "queue = %#"PRIx32,
6361 flow->skb_priority);
6362 }
9583bc14
EJ
6363 break;
6364
6365 case OFPACT_REG_MOVE:
2d9b49dd 6366 xlate_ofpact_reg_move(ctx, ofpact_get_REG_MOVE(a));
9583bc14
EJ
6367 break;
6368
b2dd70be
JR
6369 case OFPACT_SET_FIELD:
6370 set_field = ofpact_get_SET_FIELD(a);
6371 mf = set_field->field;
b2dd70be 6372
aff49b8c
JR
6373 /* Set the field only if the packet actually has it. */
6374 if (mf_are_prereqs_ok(mf, flow, wc)) {
128684a6
JR
6375 mf_mask_field_masked(mf, ofpact_set_field_mask(set_field), wc);
6376 mf_set_flow_value_masked(mf, set_field->value,
6377 ofpact_set_field_mask(set_field),
6378 flow);
2d9b49dd
BP
6379 } else {
6380 xlate_report(ctx, OFT_WARN,
6381 "unmet prerequisites for %s, set_field ignored",
6382 mf->name);
6383
b8778a0d 6384 }
b2dd70be
JR
6385 break;
6386
9583bc14 6387 case OFPACT_STACK_PUSH:
33bf9176
BP
6388 nxm_execute_stack_push(ofpact_get_STACK_PUSH(a), flow, wc,
6389 &ctx->stack);
9583bc14
EJ
6390 break;
6391
6392 case OFPACT_STACK_POP:
2d9b49dd 6393 xlate_ofpact_stack_pop(ctx, ofpact_get_STACK_POP(a));
9583bc14
EJ
6394 break;
6395
6396 case OFPACT_PUSH_MPLS:
8bfd0fda 6397 compose_mpls_push_action(ctx, ofpact_get_PUSH_MPLS(a));
9583bc14
EJ
6398 break;
6399
6400 case OFPACT_POP_MPLS:
8bfd0fda 6401 compose_mpls_pop_action(ctx, ofpact_get_POP_MPLS(a)->ethertype);
9583bc14
EJ
6402 break;
6403
097d4939 6404 case OFPACT_SET_MPLS_LABEL:
8bfd0fda
BP
6405 compose_set_mpls_label_action(
6406 ctx, ofpact_get_SET_MPLS_LABEL(a)->label);
1d741d6d 6407 break;
097d4939
JR
6408
6409 case OFPACT_SET_MPLS_TC:
8bfd0fda 6410 compose_set_mpls_tc_action(ctx, ofpact_get_SET_MPLS_TC(a)->tc);
097d4939
JR
6411 break;
6412
9583bc14 6413 case OFPACT_SET_MPLS_TTL:
8bfd0fda 6414 compose_set_mpls_ttl_action(ctx, ofpact_get_SET_MPLS_TTL(a)->ttl);
9583bc14
EJ
6415 break;
6416
6417 case OFPACT_DEC_MPLS_TTL:
9cfef3d0 6418 if (compose_dec_mpls_ttl_action(ctx)) {
ad3efdcb 6419 return;
9583bc14
EJ
6420 }
6421 break;
6422
6423 case OFPACT_DEC_TTL:
f74e7df7 6424 wc->masks.nw_ttl = 0xff;
9583bc14 6425 if (compose_dec_ttl(ctx, ofpact_get_DEC_TTL(a))) {
ad3efdcb 6426 return;
9583bc14
EJ
6427 }
6428 break;
6429
6430 case OFPACT_NOTE:
6431 /* Nothing to do. */
6432 break;
6433
6434 case OFPACT_MULTIPATH:
33bf9176 6435 multipath_execute(ofpact_get_MULTIPATH(a), flow, wc);
2d9b49dd 6436 xlate_report_subfield(ctx, &ofpact_get_MULTIPATH(a)->dst);
9583bc14
EJ
6437 break;
6438
6439 case OFPACT_BUNDLE:
9583bc14
EJ
6440 xlate_bundle_action(ctx, ofpact_get_BUNDLE(a));
6441 break;
6442
6443 case OFPACT_OUTPUT_REG:
6444 xlate_output_reg_action(ctx, ofpact_get_OUTPUT_REG(a));
6445 break;
6446
aaca4fe0
WT
6447 case OFPACT_OUTPUT_TRUNC:
6448 xlate_output_trunc_action(ctx, ofpact_get_OUTPUT_TRUNC(a)->port,
6449 ofpact_get_OUTPUT_TRUNC(a)->max_len);
6450 break;
6451
9583bc14
EJ
6452 case OFPACT_LEARN:
6453 xlate_learn_action(ctx, ofpact_get_LEARN(a));
6454 break;
6455
2d9b49dd 6456 case OFPACT_CONJUNCTION:
afc3987b
BP
6457 /* A flow with a "conjunction" action represents part of a special
6458 * kind of "set membership match". Such a flow should not actually
6459 * get executed, but it could via, say, a "packet-out", even though
6460 * that wouldn't be useful. Log it to help debugging. */
2d9b49dd 6461 xlate_report_error(ctx, "executing no-op conjunction action");
18080541
BP
6462 break;
6463
9583bc14
EJ
6464 case OFPACT_EXIT:
6465 ctx->exit = true;
6466 break;
6467
2d9b49dd
BP
6468 case OFPACT_UNROLL_XLATE:
6469 xlate_ofpact_unroll_xlate(ctx, ofpact_get_UNROLL_XLATE(a));
e672ff9b 6470 break;
2d9b49dd 6471
9583bc14 6472 case OFPACT_FIN_TIMEOUT:
33bf9176 6473 memset(&wc->masks.nw_proto, 0xff, sizeof wc->masks.nw_proto);
9583bc14
EJ
6474 xlate_fin_timeout(ctx, ofpact_get_FIN_TIMEOUT(a));
6475 break;
6476
6477 case OFPACT_CLEAR_ACTIONS:
2d9b49dd 6478 xlate_report_action_set(ctx, "was");
7fdb60a7 6479 ofpbuf_clear(&ctx->action_set);
c61f3870
BP
6480 ctx->xin->flow.actset_output = OFPP_UNSET;
6481 ctx->action_set_has_group = false;
7fdb60a7
SH
6482 break;
6483
6484 case OFPACT_WRITE_ACTIONS:
7e7e8dbb 6485 xlate_write_actions(ctx, ofpact_get_WRITE_ACTIONS(a));
2d9b49dd 6486 xlate_report_action_set(ctx, "is");
9583bc14
EJ
6487 break;
6488
6489 case OFPACT_WRITE_METADATA:
6490 metadata = ofpact_get_WRITE_METADATA(a);
33bf9176
BP
6491 flow->metadata &= ~metadata->mask;
6492 flow->metadata |= metadata->metadata & metadata->mask;
9583bc14
EJ
6493 break;
6494
638a19b0 6495 case OFPACT_METER:
076caa2f 6496 xlate_meter_action(ctx, ofpact_get_METER(a));
638a19b0
JR
6497 break;
6498
9583bc14 6499 case OFPACT_GOTO_TABLE: {
9583bc14 6500 struct ofpact_goto_table *ogt = ofpact_get_GOTO_TABLE(a);
9583bc14 6501
9167fc1a
JR
6502 ovs_assert(ctx->table_id < ogt->table_id);
6503
4468099e 6504 xlate_table_action(ctx, ctx->xin->flow.in_port.ofp_port,
2cd20955 6505 ogt->table_id, true, true, false);
9583bc14
EJ
6506 break;
6507 }
6508
6509 case OFPACT_SAMPLE:
6510 xlate_sample_action(ctx, ofpact_get_SAMPLE(a));
6511 break;
d4abaff5 6512
7ae62a67 6513 case OFPACT_CLONE:
eee69393 6514 compose_clone(ctx, ofpact_get_CLONE(a));
7ae62a67
WT
6515 break;
6516
f839892a
JS
6517 case OFPACT_ENCAP:
6518 xlate_generic_encap_action(ctx, ofpact_get_ENCAP(a));
6519 break;
6520
6521 case OFPACT_DECAP: {
6522 bool recirc_needed =
6523 xlate_generic_decap_action(ctx, ofpact_get_DECAP(a));
6524 if (!ctx->error && recirc_needed) {
6525 /* Recirculate for parsing of inner packet. */
6526 ctx_trigger_freeze(ctx);
6527 /* Then continue with next action. */
6528 a = ofpact_next(a);
6529 }
6530 break;
6531 }
6532
07659514 6533 case OFPACT_CT:
07659514
JS
6534 compose_conntrack_action(ctx, ofpact_get_CT(a));
6535 break;
6536
72fe7578
BP
6537 case OFPACT_CT_CLEAR:
6538 clear_conntrack(ctx);
6539 break;
6540
9ac0aada
JR
6541 case OFPACT_NAT:
6542 /* This will be processed by compose_conntrack_action(). */
6543 ctx->ct_nat_action = ofpact_get_NAT(a);
6544 break;
6545
d4abaff5 6546 case OFPACT_DEBUG_RECIRC:
1d361a81 6547 ctx_trigger_freeze(ctx);
d4abaff5
BP
6548 a = ofpact_next(a);
6549 break;
9583bc14 6550 }
1d741d6d
JR
6551
6552 /* Check if need to store this and the remaining actions for later
6553 * execution. */
1d361a81
BP
6554 if (!ctx->error && ctx->exit && ctx_first_frozen_action(ctx)) {
6555 freeze_unroll_actions(a, ofpact_end(ofpacts, ofpacts_len), ctx);
1d741d6d
JR
6556 break;
6557 }
9583bc14 6558 }
9583bc14
EJ
6559}
6560
6561void
6562xlate_in_init(struct xlate_in *xin, struct ofproto_dpif *ofproto,
1f4a8933
JR
6563 ovs_version_t version, const struct flow *flow,
6564 ofp_port_t in_port, struct rule_dpif *rule, uint16_t tcp_flags,
1520ef4f
BP
6565 const struct dp_packet *packet, struct flow_wildcards *wc,
6566 struct ofpbuf *odp_actions)
9583bc14
EJ
6567{
6568 xin->ofproto = ofproto;
1f4a8933 6569 xin->tables_version = version;
9583bc14 6570 xin->flow = *flow;
8d8ab6c2 6571 xin->upcall_flow = flow;
cc377352 6572 xin->flow.in_port.ofp_port = in_port;
c61f3870 6573 xin->flow.actset_output = OFPP_UNSET;
9583bc14 6574 xin->packet = packet;
df70a773 6575 xin->allow_side_effects = packet != NULL;
9583bc14 6576 xin->rule = rule;
b256dc52 6577 xin->xcache = NULL;
9583bc14
EJ
6578 xin->ofpacts = NULL;
6579 xin->ofpacts_len = 0;
6580 xin->tcp_flags = tcp_flags;
2d9b49dd 6581 xin->trace = NULL;
9583bc14 6582 xin->resubmit_stats = NULL;
790c5d26 6583 xin->depth = 0;
cdd42eda 6584 xin->resubmits = 0;
49a73e0c 6585 xin->wc = wc;
1520ef4f 6586 xin->odp_actions = odp_actions;
331c07ac 6587 xin->in_packet_out = false;
e6bc8e74 6588 xin->recirc_queue = NULL;
e672ff9b
JR
6589
6590 /* Do recirc lookup. */
1d361a81 6591 xin->frozen_state = NULL;
29b1ea3f
BP
6592 if (flow->recirc_id) {
6593 const struct recirc_id_node *node
6594 = recirc_id_node_find(flow->recirc_id);
6595 if (node) {
1d361a81 6596 xin->frozen_state = &node->state;
29b1ea3f
BP
6597 }
6598 }
9583bc14
EJ
6599}
6600
6601void
6602xlate_out_uninit(struct xlate_out *xout)
6603{
e672ff9b 6604 if (xout) {
fbf5d6ec 6605 recirc_refs_unref(&xout->recircs);
9583bc14
EJ
6606 }
6607}
9583bc14 6608\f
55954f6e
EJ
6609static struct skb_priority_to_dscp *
6610get_skb_priority(const struct xport *xport, uint32_t skb_priority)
6611{
6612 struct skb_priority_to_dscp *pdscp;
6613 uint32_t hash;
6614
6615 hash = hash_int(skb_priority, 0);
6616 HMAP_FOR_EACH_IN_BUCKET (pdscp, hmap_node, hash, &xport->skb_priorities) {
6617 if (pdscp->skb_priority == skb_priority) {
6618 return pdscp;
6619 }
6620 }
6621 return NULL;
6622}
6623
6624static bool
6625dscp_from_skb_priority(const struct xport *xport, uint32_t skb_priority,
6626 uint8_t *dscp)
6627{
6628 struct skb_priority_to_dscp *pdscp = get_skb_priority(xport, skb_priority);
6629 *dscp = pdscp ? pdscp->dscp : 0;
6630 return pdscp != NULL;
6631}
6632
16194afd
DDP
6633static size_t
6634count_skb_priorities(const struct xport *xport)
6635{
6636 return hmap_count(&xport->skb_priorities);
6637}
6638
55954f6e
EJ
6639static void
6640clear_skb_priorities(struct xport *xport)
6641{
4ec3d7c7 6642 struct skb_priority_to_dscp *pdscp;
55954f6e 6643
4ec3d7c7 6644 HMAP_FOR_EACH_POP (pdscp, hmap_node, &xport->skb_priorities) {
55954f6e
EJ
6645 free(pdscp);
6646 }
6647}
6648
ce4a6b76
BP
6649static bool
6650actions_output_to_local_port(const struct xlate_ctx *ctx)
6651{
46c88433 6652 odp_port_t local_odp_port = ofp_port_to_odp_port(ctx->xbridge, OFPP_LOCAL);
ce4a6b76
BP
6653 const struct nlattr *a;
6654 unsigned int left;
6655
1520ef4f
BP
6656 NL_ATTR_FOR_EACH_UNSAFE (a, left, ctx->odp_actions->data,
6657 ctx->odp_actions->size) {
ce4a6b76
BP
6658 if (nl_attr_type(a) == OVS_ACTION_ATTR_OUTPUT
6659 && nl_attr_get_odp_port(a) == local_odp_port) {
6660 return true;
6661 }
6662 }
6663 return false;
6664}
9583bc14 6665
5e2a6702 6666#if defined(__linux__)
7d031d7e
BP
6667/* Returns the maximum number of packets that the Linux kernel is willing to
6668 * queue up internally to certain kinds of software-implemented ports, or the
6669 * default (and rarely modified) value if it cannot be determined. */
6670static int
6671netdev_max_backlog(void)
6672{
6673 static struct ovsthread_once once = OVSTHREAD_ONCE_INITIALIZER;
6674 static int max_backlog = 1000; /* The normal default value. */
6675
6676 if (ovsthread_once_start(&once)) {
6677 static const char filename[] = "/proc/sys/net/core/netdev_max_backlog";
6678 FILE *stream;
6679 int n;
6680
6681 stream = fopen(filename, "r");
6682 if (!stream) {
120c348f 6683 VLOG_INFO("%s: open failed (%s)", filename, ovs_strerror(errno));
7d031d7e
BP
6684 } else {
6685 if (fscanf(stream, "%d", &n) != 1) {
6686 VLOG_WARN("%s: read error", filename);
6687 } else if (n <= 100) {
6688 VLOG_WARN("%s: unexpectedly small value %d", filename, n);
6689 } else {
6690 max_backlog = n;
6691 }
6692 fclose(stream);
6693 }
6694 ovsthread_once_done(&once);
6695
6696 VLOG_DBG("%s: using %d max_backlog", filename, max_backlog);
6697 }
6698
6699 return max_backlog;
6700}
6701
6702/* Counts and returns the number of OVS_ACTION_ATTR_OUTPUT actions in
6703 * 'odp_actions'. */
6704static int
6705count_output_actions(const struct ofpbuf *odp_actions)
6706{
6707 const struct nlattr *a;
6708 size_t left;
6709 int n = 0;
6710
6fd6ed71 6711 NL_ATTR_FOR_EACH_UNSAFE (a, left, odp_actions->data, odp_actions->size) {
7d031d7e
BP
6712 if (a->nla_type == OVS_ACTION_ATTR_OUTPUT) {
6713 n++;
6714 }
6715 }
6716 return n;
6717}
5e2a6702 6718#endif /* defined(__linux__) */
7d031d7e
BP
6719
6720/* Returns true if 'odp_actions' contains more output actions than the datapath
6721 * can reliably handle in one go. On Linux, this is the value of the
6722 * net.core.netdev_max_backlog sysctl, which limits the maximum number of
6723 * packets that the kernel is willing to queue up for processing while the
6724 * datapath is processing a set of actions. */
6725static bool
5e2a6702 6726too_many_output_actions(const struct ofpbuf *odp_actions OVS_UNUSED)
7d031d7e
BP
6727{
6728#ifdef __linux__
6fd6ed71 6729 return (odp_actions->size / NL_A_U32_SIZE > netdev_max_backlog()
7d031d7e
BP
6730 && count_output_actions(odp_actions) > netdev_max_backlog());
6731#else
6732 /* OSes other than Linux might have similar limits, but we don't know how
6733 * to determine them.*/
6734 return false;
6735#endif
6736}
6737
234c3da9
BP
6738static void
6739xlate_wc_init(struct xlate_ctx *ctx)
6740{
6741 flow_wildcards_init_catchall(ctx->wc);
6742
6743 /* Some fields we consider to always be examined. */
3d4b2e6e 6744 WC_MASK_FIELD(ctx->wc, packet_type);
5e2e998a 6745 WC_MASK_FIELD(ctx->wc, in_port);
3d4b2e6e
JS
6746 if (is_ethernet(&ctx->xin->flow, NULL)) {
6747 WC_MASK_FIELD(ctx->wc, dl_type);
6748 }
234c3da9 6749 if (is_ip_any(&ctx->xin->flow)) {
5e2e998a 6750 WC_MASK_FIELD_MASK(ctx->wc, nw_frag, FLOW_NW_FRAG_MASK);
234c3da9
BP
6751 }
6752
6753 if (ctx->xbridge->support.odp.recirc) {
6754 /* Always exactly match recirc_id when datapath supports
6755 * recirculation. */
5e2e998a 6756 WC_MASK_FIELD(ctx->wc, recirc_id);
234c3da9
BP
6757 }
6758
6759 if (ctx->xbridge->netflow) {
6760 netflow_mask_wc(&ctx->xin->flow, ctx->wc);
6761 }
6762
6763 tnl_wc_init(&ctx->xin->flow, ctx->wc);
6764}
6765
6766static void
6767xlate_wc_finish(struct xlate_ctx *ctx)
6768{
f0fb825a
EG
6769 int i;
6770
234c3da9
BP
6771 /* Clear the metadata and register wildcard masks, because we won't
6772 * use non-header fields as part of the cache. */
6773 flow_wildcards_clear_non_packet_fields(ctx->wc);
6774
f839892a 6775 /* Wildcard ethernet fields if the original packet type was not
beb75a40
JS
6776 * Ethernet. */
6777 if (ctx->xin->upcall_flow->packet_type != htonl(PT_ETH)) {
6778 ctx->wc->masks.dl_dst = eth_addr_zero;
6779 ctx->wc->masks.dl_src = eth_addr_zero;
3d4b2e6e 6780 ctx->wc->masks.dl_type = 0;
beb75a40
JS
6781 }
6782
234c3da9
BP
6783 /* ICMPv4 and ICMPv6 have 8-bit "type" and "code" fields. struct flow
6784 * uses the low 8 bits of the 16-bit tp_src and tp_dst members to
6785 * represent these fields. The datapath interface, on the other hand,
6786 * represents them with just 8 bits each. This means that if the high
6787 * 8 bits of the masks for these fields somehow become set, then they
6788 * will get chopped off by a round trip through the datapath, and
6789 * revalidation will spot that as an inconsistency and delete the flow.
6790 * Avoid the problem here by making sure that only the low 8 bits of
6791 * either field can be unwildcarded for ICMP.
6792 */
a75636c8 6793 if (is_icmpv4(&ctx->xin->flow, NULL) || is_icmpv6(&ctx->xin->flow, NULL)) {
234c3da9
BP
6794 ctx->wc->masks.tp_src &= htons(UINT8_MAX);
6795 ctx->wc->masks.tp_dst &= htons(UINT8_MAX);
6796 }
6797 /* VLAN_TCI CFI bit must be matched if any of the TCI is matched. */
f0fb825a
EG
6798 for (i = 0; i < FLOW_MAX_VLAN_HEADERS; i++) {
6799 if (ctx->wc->masks.vlans[i].tci) {
6800 ctx->wc->masks.vlans[i].tci |= htons(VLAN_CFI);
6801 }
234c3da9 6802 }
4a7ab326
DDP
6803
6804 /* The classifier might return masks that match on tp_src and tp_dst even
6805 * for later fragments. This happens because there might be flows that
6806 * match on tp_src or tp_dst without matching on the frag bits, because
6807 * it is not a prerequisite for OpenFlow. Since it is a prerequisite for
6808 * datapath flows and since tp_src and tp_dst are always going to be 0,
6809 * wildcard the fields here. */
6810 if (ctx->xin->flow.nw_frag & FLOW_NW_FRAG_LATER) {
6811 ctx->wc->masks.tp_src = 0;
6812 ctx->wc->masks.tp_dst = 0;
6813 }
234c3da9
BP
6814}
6815
e672ff9b
JR
6816/* Translates the flow, actions, or rule in 'xin' into datapath actions in
6817 * 'xout'.
56450a41 6818 * The caller must take responsibility for eventually freeing 'xout', with
fff1b9c0
JR
6819 * xlate_out_uninit().
6820 * Returns 'XLATE_OK' if translation was successful. In case of an error an
6821 * empty set of actions will be returned in 'xin->odp_actions' (if non-NULL),
6822 * so that most callers may ignore the return value and transparently install a
6823 * drop flow when the translation fails. */
6824enum xlate_error
84f0f298 6825xlate_actions(struct xlate_in *xin, struct xlate_out *xout)
9583bc14 6826{
e467ea42
BP
6827 *xout = (struct xlate_out) {
6828 .slow = 0,
fbf5d6ec 6829 .recircs = RECIRC_REFS_EMPTY_INITIALIZER,
e467ea42
BP
6830 };
6831
84f0f298 6832 struct xlate_cfg *xcfg = ovsrcu_get(struct xlate_cfg *, &xcfgp);
bb00fdef
BP
6833 struct xbridge *xbridge = xbridge_lookup(xcfg, xin->ofproto);
6834 if (!xbridge) {
fff1b9c0 6835 return XLATE_BRIDGE_NOT_FOUND;
bb00fdef
BP
6836 }
6837
33bf9176
BP
6838 struct flow *flow = &xin->flow;
6839
84cf3c1f 6840 uint8_t stack_stub[1024];
bb00fdef 6841 uint64_t action_set_stub[1024 / 8];
1d361a81 6842 uint64_t frozen_actions_stub[1024 / 8];
1520ef4f
BP
6843 uint64_t actions_stub[256 / 8];
6844 struct ofpbuf scratch_actions = OFPBUF_STUB_INITIALIZER(actions_stub);
0d3239e8 6845 struct flow paused_flow;
bb00fdef
BP
6846 struct xlate_ctx ctx = {
6847 .xin = xin,
6848 .xout = xout,
6849 .base_flow = *flow,
c2b878e0 6850 .orig_tunnel_ipv6_dst = flow_tnl_dst(&flow->tunnel),
bb00fdef
BP
6851 .xbridge = xbridge,
6852 .stack = OFPBUF_STUB_INITIALIZER(stack_stub),
6853 .rule = xin->rule,
c0e638aa
BP
6854 .wc = (xin->wc
6855 ? xin->wc
f36efd90 6856 : &(struct flow_wildcards) { .masks = { .dl_type = 0 } }),
1520ef4f 6857 .odp_actions = xin->odp_actions ? xin->odp_actions : &scratch_actions,
bb00fdef 6858
790c5d26 6859 .depth = xin->depth,
cdd42eda 6860 .resubmits = xin->resubmits,
bb00fdef
BP
6861 .in_group = false,
6862 .in_action_set = false,
331c07ac 6863 .in_packet_out = xin->in_packet_out,
f839892a 6864 .pending_encap = false,
1fc11c59 6865 .encap_data = NULL,
bb00fdef
BP
6866
6867 .table_id = 0,
6868 .rule_cookie = OVS_BE64_MAX,
6869 .orig_skb_priority = flow->skb_priority,
6870 .sflow_n_outputs = 0,
6871 .sflow_odp_port = 0,
2031ef97 6872 .nf_output_iface = NF_OUT_DROP,
bb00fdef 6873 .exit = false,
fff1b9c0 6874 .error = XLATE_OK,
3d6151f3 6875 .mirrors = 0,
bb00fdef 6876
1d361a81 6877 .freezing = false,
53cc166a 6878 .recirc_update_dp_hash = false,
1d361a81 6879 .frozen_actions = OFPBUF_STUB_INITIALIZER(frozen_actions_stub),
77ab5fd2 6880 .pause = NULL,
0d3239e8 6881 .paused_flow = &paused_flow,
bb00fdef 6882
e12ec36b 6883 .was_mpls = false,
07659514 6884 .conntracked = false,
bb00fdef 6885
9ac0aada
JR
6886 .ct_nat_action = NULL,
6887
bb00fdef
BP
6888 .action_set_has_group = false,
6889 .action_set = OFPBUF_STUB_INITIALIZER(action_set_stub),
6890 };
865ca6cf
BP
6891
6892 /* 'base_flow' reflects the packet as it came in, but we need it to reflect
42deb67d
PS
6893 * the packet as the datapath will treat it for output actions. Our
6894 * datapath doesn't retain tunneling information without us re-setting
6895 * it, so clear the tunnel data.
865ca6cf 6896 */
42deb67d 6897
bb00fdef 6898 memset(&ctx.base_flow.tunnel, 0, sizeof ctx.base_flow.tunnel);
865ca6cf 6899
1520ef4f 6900 ofpbuf_reserve(ctx.odp_actions, NL_A_U32_SIZE);
c0e638aa 6901 xlate_wc_init(&ctx);
bb00fdef 6902
46c88433 6903 COVERAGE_INC(xlate_actions);
9583bc14 6904
2d9b49dd
BP
6905 xin->trace = xlate_report(&ctx, OFT_BRIDGE, "bridge(\"%s\")",
6906 xbridge->name);
1d361a81
BP
6907 if (xin->frozen_state) {
6908 const struct frozen_state *state = xin->frozen_state;
e672ff9b 6909
2d9b49dd
BP
6910 struct ovs_list *old_trace = xin->trace;
6911 xin->trace = xlate_report(&ctx, OFT_THAW, "thaw");
d6bef3cc 6912
e672ff9b 6913 if (xin->ofpacts_len > 0 || ctx.rule) {
2d9b49dd
BP
6914 xlate_report_error(&ctx, "Recirculation conflict (%s)!",
6915 xin->ofpacts_len ? "actions" : "rule");
fff1b9c0 6916 ctx.error = XLATE_RECIRCULATION_CONFLICT;
1520ef4f 6917 goto exit;
e672ff9b
JR
6918 }
6919
6920 /* Set the bridge for post-recirculation processing if needed. */
07a3cd5c 6921 if (!uuid_equals(&ctx.xbridge->ofproto->uuid, &state->ofproto_uuid)) {
2082425c 6922 const struct xbridge *new_bridge
290835f9 6923 = xbridge_lookup_by_uuid(xcfg, &state->ofproto_uuid);
e672ff9b
JR
6924
6925 if (OVS_UNLIKELY(!new_bridge)) {
6926 /* Drop the packet if the bridge cannot be found. */
2d9b49dd 6927 xlate_report_error(&ctx, "Frozen bridge no longer exists.");
fff1b9c0 6928 ctx.error = XLATE_BRIDGE_NOT_FOUND;
2d9b49dd 6929 xin->trace = old_trace;
1520ef4f 6930 goto exit;
e672ff9b
JR
6931 }
6932 ctx.xbridge = new_bridge;
1f4a8933
JR
6933 /* The bridge is now known so obtain its table version. */
6934 ctx.xin->tables_version
6935 = ofproto_dpif_get_tables_version(ctx.xbridge->ofproto);
e672ff9b
JR
6936 }
6937
1d361a81
BP
6938 /* Set the thawed table id. Note: A table lookup is done only if there
6939 * are no frozen actions. */
2082425c 6940 ctx.table_id = state->table_id;
2d9b49dd
BP
6941 xlate_report(&ctx, OFT_THAW,
6942 "Resuming from table %"PRIu8, ctx.table_id);
e672ff9b 6943
40b0fbd3 6944 ctx.conntracked = state->conntracked;
07659514 6945 if (!state->conntracked) {
72fe7578 6946 clear_conntrack(&ctx);
07659514
JS
6947 }
6948
e672ff9b 6949 /* Restore pipeline metadata. May change flow's in_port and other
1d361a81
BP
6950 * metadata to the values that existed when freezing was triggered. */
6951 frozen_metadata_to_flow(&state->metadata, flow);
e672ff9b
JR
6952
6953 /* Restore stack, if any. */
2082425c 6954 if (state->stack) {
84cf3c1f 6955 ofpbuf_put(&ctx.stack, state->stack, state->stack_size);
e672ff9b
JR
6956 }
6957
29bae541
BP
6958 /* Restore mirror state. */
6959 ctx.mirrors = state->mirrors;
6960
e672ff9b 6961 /* Restore action set, if any. */
2082425c 6962 if (state->action_set_len) {
2d9b49dd 6963 xlate_report_actions(&ctx, OFT_THAW, "Restoring action set",
417509fa 6964 state->action_set, state->action_set_len);
d6bef3cc 6965
7e7e8dbb
BP
6966 flow->actset_output = OFPP_UNSET;
6967 xlate_write_actions__(&ctx, state->action_set,
6968 state->action_set_len);
e672ff9b
JR
6969 }
6970
1d361a81
BP
6971 /* Restore frozen actions. If there are no actions, processing will
6972 * start with a lookup in the table set above. */
417509fa
BP
6973 xin->ofpacts = state->ofpacts;
6974 xin->ofpacts_len = state->ofpacts_len;
6975 if (state->ofpacts_len) {
2d9b49dd 6976 xlate_report_actions(&ctx, OFT_THAW, "Restoring actions",
d6bef3cc 6977 xin->ofpacts, xin->ofpacts_len);
e672ff9b 6978 }
e672ff9b 6979
2d9b49dd
BP
6980 xin->trace = old_trace;
6981 } else if (OVS_UNLIKELY(flow->recirc_id)) {
6982 xlate_report_error(&ctx,
6983 "Recirculation context not found for ID %"PRIx32,
6984 flow->recirc_id);
fff1b9c0 6985 ctx.error = XLATE_NO_RECIRCULATION_CONTEXT;
1520ef4f 6986 goto exit;
e672ff9b 6987 }
9583bc14 6988
8d8ab6c2
JG
6989 /* Tunnel metadata in udpif format must be normalized before translation. */
6990 if (flow->tunnel.flags & FLOW_TNL_F_UDPIF) {
5b09d9f7
MS
6991 const struct tun_table *tun_tab = ofproto_get_tun_tab(
6992 &ctx.xbridge->ofproto->up);
8d8ab6c2
JG
6993 int err;
6994
6995 err = tun_metadata_from_geneve_udpif(tun_tab, &xin->upcall_flow->tunnel,
6996 &xin->upcall_flow->tunnel,
6997 &flow->tunnel);
6998 if (err) {
2d9b49dd 6999 xlate_report_error(&ctx, "Invalid Geneve tunnel metadata");
8d8ab6c2
JG
7000 ctx.error = XLATE_INVALID_TUNNEL_METADATA;
7001 goto exit;
7002 }
7003 } else if (!flow->tunnel.metadata.tab) {
7004 /* If the original flow did not come in on a tunnel, then it won't have
7005 * FLOW_TNL_F_UDPIF set. However, we still need to have a metadata
7006 * table in case we generate tunnel actions. */
5b09d9f7
MS
7007 flow->tunnel.metadata.tab = ofproto_get_tun_tab(
7008 &ctx.xbridge->ofproto->up);
8d8ab6c2
JG
7009 }
7010 ctx.wc->masks.tunnel.metadata.tab = flow->tunnel.metadata.tab;
7011
beb75a40
JS
7012 /* Get the proximate input port of the packet. (If xin->frozen_state,
7013 * flow->in_port is the ultimate input port of the packet.) */
7014 struct xport *in_port = get_ofp_port(xbridge,
7015 ctx.base_flow.in_port.ofp_port);
7016
875ab130
BP
7017 if (flow->packet_type != htonl(PT_ETH) && in_port &&
7018 in_port->pt_mode == NETDEV_PT_LEGACY_L3 && ctx.table_id == 0) {
beb75a40
JS
7019 /* Add dummy Ethernet header to non-L2 packet if it's coming from a
7020 * L3 port. So all packets will be L2 packets for lookup.
7021 * The dl_type has already been set from the packet_type. */
7022 flow->packet_type = htonl(PT_ETH);
7023 flow->dl_src = eth_addr_zero;
7024 flow->dl_dst = eth_addr_zero;
f839892a 7025 ctx.pending_encap = true;
beb75a40
JS
7026 }
7027
10c44245 7028 if (!xin->ofpacts && !ctx.rule) {
b2e89cc9 7029 ctx.rule = rule_dpif_lookup_from_table(
1f4a8933 7030 ctx.xbridge->ofproto, ctx.xin->tables_version, flow, ctx.wc,
1e1e1d19 7031 ctx.xin->resubmit_stats, &ctx.table_id,
a027899e 7032 flow->in_port.ofp_port, true, true, ctx.xin->xcache);
10c44245 7033 if (ctx.xin->resubmit_stats) {
b2e89cc9 7034 rule_dpif_credit_stats(ctx.rule, ctx.xin->resubmit_stats);
10c44245 7035 }
b256dc52
JS
7036 if (ctx.xin->xcache) {
7037 struct xc_entry *entry;
7038
7039 entry = xlate_cache_add_entry(ctx.xin->xcache, XC_RULE);
901a517e 7040 entry->rule = ctx.rule;
07a3cd5c 7041 ofproto_rule_ref(&ctx.rule->up);
b256dc52 7042 }
a8c31348 7043
2d9b49dd 7044 xlate_report_table(&ctx, ctx.rule, ctx.table_id);
10c44245 7045 }
10c44245 7046
1d361a81
BP
7047 /* Tunnel stats only for not-thawed packets. */
7048 if (!xin->frozen_state && in_port && in_port->is_tunnel) {
b256dc52
JS
7049 if (ctx.xin->resubmit_stats) {
7050 netdev_vport_inc_rx(in_port->netdev, ctx.xin->resubmit_stats);
7051 if (in_port->bfd) {
7052 bfd_account_rx(in_port->bfd, ctx.xin->resubmit_stats);
7053 }
7054 }
7055 if (ctx.xin->xcache) {
7056 struct xc_entry *entry;
7057
7058 entry = xlate_cache_add_entry(ctx.xin->xcache, XC_NETDEV);
901a517e
JR
7059 entry->dev.rx = netdev_ref(in_port->netdev);
7060 entry->dev.bfd = bfd_ref(in_port->bfd);
d6fc5f57
EJ
7061 }
7062 }
7063
1d361a81 7064 if (!xin->frozen_state && process_special(&ctx, in_port)) {
bef1403e
BP
7065 /* process_special() did all the processing for this packet.
7066 *
1d361a81
BP
7067 * We do not perform special processing on thawed packets, since that
7068 * was done before they were frozen and should not be redone. */
bef1403e
BP
7069 } else if (in_port && in_port->xbundle
7070 && xbundle_mirror_out(xbridge, in_port->xbundle)) {
2d9b49dd
BP
7071 xlate_report_error(&ctx, "dropping packet received on port "
7072 "%s, which is reserved exclusively for mirroring",
7073 in_port->xbundle->name);
bef1403e 7074 } else {
1d361a81 7075 /* Sampling is done on initial reception; don't redo after thawing. */
a6092018 7076 unsigned int user_cookie_offset = 0;
1d361a81 7077 if (!xin->frozen_state) {
a6092018
BP
7078 user_cookie_offset = compose_sflow_action(&ctx);
7079 compose_ipfix_action(&ctx, ODPP_NONE);
e672ff9b 7080 }
0731abc5 7081 size_t sample_actions_len = ctx.odp_actions->size;
9583bc14 7082
234c3da9
BP
7083 if (tnl_process_ecn(flow)
7084 && (!in_port || may_receive(in_port, &ctx))) {
1806291d
BP
7085 const struct ofpact *ofpacts;
7086 size_t ofpacts_len;
7087
7088 if (xin->ofpacts) {
7089 ofpacts = xin->ofpacts;
7090 ofpacts_len = xin->ofpacts_len;
7091 } else if (ctx.rule) {
7092 const struct rule_actions *actions
07a3cd5c 7093 = rule_get_actions(&ctx.rule->up);
1806291d
BP
7094 ofpacts = actions->ofpacts;
7095 ofpacts_len = actions->ofpacts_len;
07a3cd5c 7096 ctx.rule_cookie = ctx.rule->up.flow_cookie;
1806291d
BP
7097 } else {
7098 OVS_NOT_REACHED();
7099 }
7100
7efbc3b7 7101 mirror_ingress_packet(&ctx);
9583bc14 7102 do_xlate_actions(ofpacts, ofpacts_len, &ctx);
fff1b9c0
JR
7103 if (ctx.error) {
7104 goto exit;
7105 }
9583bc14
EJ
7106
7107 /* We've let OFPP_NORMAL and the learning action look at the
1d361a81 7108 * packet, so cancel all actions and freezing if forwarding is
8a5fb3b4 7109 * disabled. */
9efd308e
DV
7110 if (in_port && (!xport_stp_forward_state(in_port) ||
7111 !xport_rstp_forward_state(in_port))) {
1520ef4f 7112 ctx.odp_actions->size = sample_actions_len;
1d361a81 7113 ctx_cancel_freeze(&ctx);
8a5fb3b4
BP
7114 ofpbuf_clear(&ctx.action_set);
7115 }
7116
1d361a81 7117 if (!ctx.freezing) {
8a5fb3b4 7118 xlate_action_set(&ctx);
e672ff9b 7119 }
1d361a81 7120 if (ctx.freezing) {
77ab5fd2 7121 finish_freezing(&ctx);
9583bc14
EJ
7122 }
7123 }
7124
e672ff9b 7125 /* Output only fully processed packets. */
1d361a81 7126 if (!ctx.freezing
e672ff9b 7127 && xbridge->has_in_band
ce4a6b76
BP
7128 && in_band_must_output_to_local_port(flow)
7129 && !actions_output_to_local_port(&ctx)) {
e93ef1c7 7130 compose_output_action(&ctx, OFPP_LOCAL, NULL);
9583bc14 7131 }
aaa0fbae 7132
a6092018
BP
7133 if (user_cookie_offset) {
7134 fix_sflow_action(&ctx, user_cookie_offset);
e672ff9b 7135 }
9583bc14
EJ
7136 }
7137
1520ef4f 7138 if (nl_attr_oversized(ctx.odp_actions->size)) {
542024c4 7139 /* These datapath actions are too big for a Netlink attribute, so we
0f032e95
BP
7140 * can't hand them to the kernel directly. dpif_execute() can execute
7141 * them one by one with help, so just mark the result as SLOW_ACTION to
7142 * prevent the flow from being installed. */
7143 COVERAGE_INC(xlate_actions_oversize);
7144 ctx.xout->slow |= SLOW_ACTION;
1520ef4f 7145 } else if (too_many_output_actions(ctx.odp_actions)) {
7d031d7e
BP
7146 COVERAGE_INC(xlate_actions_too_many_output);
7147 ctx.xout->slow |= SLOW_ACTION;
542024c4
BP
7148 }
7149
64fb5f82
JP
7150 /* Update NetFlow for non-frozen traffic. */
7151 if (xbridge->netflow && !xin->frozen_state) {
1806291d
BP
7152 if (ctx.xin->resubmit_stats) {
7153 netflow_flow_update(xbridge->netflow, flow,
2031ef97 7154 ctx.nf_output_iface,
1806291d
BP
7155 ctx.xin->resubmit_stats);
7156 }
7157 if (ctx.xin->xcache) {
7158 struct xc_entry *entry;
b256dc52 7159
1806291d 7160 entry = xlate_cache_add_entry(ctx.xin->xcache, XC_NETFLOW);
901a517e
JR
7161 entry->nf.netflow = netflow_ref(xbridge->netflow);
7162 entry->nf.flow = xmemdup(flow, sizeof *flow);
7163 entry->nf.iface = ctx.nf_output_iface;
d6fc5f57
EJ
7164 }
7165 }
7166
8d8ab6c2
JG
7167 /* Translate tunnel metadata masks to udpif format if necessary. */
7168 if (xin->upcall_flow->tunnel.flags & FLOW_TNL_F_UDPIF) {
7169 if (ctx.wc->masks.tunnel.metadata.present.map) {
7170 const struct flow_tnl *upcall_tnl = &xin->upcall_flow->tunnel;
7171 struct geneve_opt opts[TLV_TOT_OPT_SIZE /
7172 sizeof(struct geneve_opt)];
7173
7174 tun_metadata_to_geneve_udpif_mask(&flow->tunnel,
7175 &ctx.wc->masks.tunnel,
7176 upcall_tnl->metadata.opts.gnv,
7177 upcall_tnl->metadata.present.len,
7178 opts);
7179 memset(&ctx.wc->masks.tunnel.metadata, 0,
7180 sizeof ctx.wc->masks.tunnel.metadata);
7181 memcpy(&ctx.wc->masks.tunnel.metadata.opts.gnv, opts,
7182 upcall_tnl->metadata.present.len);
7183 }
7184 ctx.wc->masks.tunnel.metadata.present.len = 0xff;
7185 ctx.wc->masks.tunnel.metadata.tab = NULL;
7186 ctx.wc->masks.tunnel.flags |= FLOW_TNL_F_UDPIF;
7187 } else if (!xin->upcall_flow->tunnel.metadata.tab) {
7188 /* If we didn't have options in UDPIF format and didn't have an existing
7189 * metadata table, then it means that there were no options at all when
7190 * we started processing and any wildcards we picked up were from
7191 * action generation. Without options on the incoming packet, wildcards
7192 * aren't meaningful. To avoid them possibly getting misinterpreted,
7193 * just clear everything. */
7194 if (ctx.wc->masks.tunnel.metadata.present.map) {
7195 memset(&ctx.wc->masks.tunnel.metadata, 0,
7196 sizeof ctx.wc->masks.tunnel.metadata);
7197 } else {
7198 ctx.wc->masks.tunnel.metadata.tab = NULL;
7199 }
7200 }
7201
c0e638aa 7202 xlate_wc_finish(&ctx);
1520ef4f
BP
7203
7204exit:
8d8ab6c2
JG
7205 /* Reset the table to what it was when we came in. If we only fetched
7206 * it locally, then it has no meaning outside of flow translation. */
7207 flow->tunnel.metadata.tab = xin->upcall_flow->tunnel.metadata.tab;
7208
1520ef4f
BP
7209 ofpbuf_uninit(&ctx.stack);
7210 ofpbuf_uninit(&ctx.action_set);
1d361a81 7211 ofpbuf_uninit(&ctx.frozen_actions);
1520ef4f 7212 ofpbuf_uninit(&scratch_actions);
1fc11c59 7213 ofpbuf_delete(ctx.encap_data);
fff1b9c0
JR
7214
7215 /* Make sure we return a "drop flow" in case of an error. */
7216 if (ctx.error) {
7217 xout->slow = 0;
7218 if (xin->odp_actions) {
7219 ofpbuf_clear(xin->odp_actions);
7220 }
7221 }
7222 return ctx.error;
91d6cd12
AW
7223}
7224
77ab5fd2
BP
7225enum ofperr
7226xlate_resume(struct ofproto_dpif *ofproto,
7227 const struct ofputil_packet_in_private *pin,
7228 struct ofpbuf *odp_actions,
7229 enum slow_path_reason *slow)
7230{
7231 struct dp_packet packet;
4d617a87
BP
7232 dp_packet_use_const(&packet, pin->base.packet,
7233 pin->base.packet_len);
77ab5fd2
BP
7234
7235 struct flow flow;
7236 flow_extract(&packet, &flow);
7237
7238 struct xlate_in xin;
1f4a8933
JR
7239 xlate_in_init(&xin, ofproto, ofproto_dpif_get_tables_version(ofproto),
7240 &flow, 0, NULL, ntohs(flow.tcp_flags),
77ab5fd2
BP
7241 &packet, NULL, odp_actions);
7242
7243 struct ofpact_note noop;
7244 ofpact_init_NOTE(&noop);
7245 noop.length = 0;
7246
7247 bool any_actions = pin->actions_len > 0;
7248 struct frozen_state state = {
7249 .table_id = 0, /* Not the table where NXAST_PAUSE was executed. */
7250 .ofproto_uuid = pin->bridge,
7251 .stack = pin->stack,
84cf3c1f 7252 .stack_size = pin->stack_size,
77ab5fd2
BP
7253 .mirrors = pin->mirrors,
7254 .conntracked = pin->conntracked,
7255
7256 /* When there are no actions, xlate_actions() will search the flow
7257 * table. We don't want it to do that (we want it to resume), so
7258 * supply a no-op action if there aren't any.
7259 *
7260 * (We can't necessarily avoid translating actions entirely if there
7261 * aren't any actions, because there might be some finishing-up to do
7262 * at the end of the pipeline, and we don't check for those
7263 * conditions.) */
7264 .ofpacts = any_actions ? pin->actions : &noop.ofpact,
7265 .ofpacts_len = any_actions ? pin->actions_len : sizeof noop,
7266
7267 .action_set = pin->action_set,
7268 .action_set_len = pin->action_set_len,
7269 };
7270 frozen_metadata_from_flow(&state.metadata,
4d617a87 7271 &pin->base.flow_metadata.flow);
77ab5fd2
BP
7272 xin.frozen_state = &state;
7273
7274 struct xlate_out xout;
7275 enum xlate_error error = xlate_actions(&xin, &xout);
7276 *slow = xout.slow;
7277 xlate_out_uninit(&xout);
7278
7279 /* xlate_actions() can generate a number of errors, but only
7280 * XLATE_BRIDGE_NOT_FOUND really stands out to me as one that we should be
7281 * sure to report over OpenFlow. The others could come up in packet-outs
7282 * or regular flow translation and I don't think that it's going to be too
7283 * useful to report them to the controller. */
7284 return error == XLATE_BRIDGE_NOT_FOUND ? OFPERR_NXR_STALE : 0;
7285}
7286
2eb79142
JG
7287/* Sends 'packet' out 'ofport'. If 'port' is a tunnel and that tunnel type
7288 * supports a notion of an OAM flag, sets it if 'oam' is true.
91d6cd12
AW
7289 * May modify 'packet'.
7290 * Returns 0 if successful, otherwise a positive errno value. */
7291int
2eb79142
JG
7292xlate_send_packet(const struct ofport_dpif *ofport, bool oam,
7293 struct dp_packet *packet)
91d6cd12 7294{
84f0f298 7295 struct xlate_cfg *xcfg = ovsrcu_get(struct xlate_cfg *, &xcfgp);
91d6cd12 7296 struct xport *xport;
2eb79142
JG
7297 uint64_t ofpacts_stub[1024 / 8];
7298 struct ofpbuf ofpacts;
91d6cd12 7299 struct flow flow;
91d6cd12 7300
2eb79142 7301 ofpbuf_use_stack(&ofpacts, ofpacts_stub, sizeof ofpacts_stub);
91d6cd12 7302 /* Use OFPP_NONE as the in_port to avoid special packet processing. */
cf62fa4c 7303 flow_extract(packet, &flow);
b5e7e61a 7304 flow.in_port.ofp_port = OFPP_NONE;
91d6cd12 7305
84f0f298 7306 xport = xport_lookup(xcfg, ofport);
91d6cd12 7307 if (!xport) {
02ea2703 7308 return EINVAL;
91d6cd12 7309 }
2eb79142
JG
7310
7311 if (oam) {
71f21279
BP
7312 const ovs_be16 flag = htons(NX_TUN_FLAG_OAM);
7313 ofpact_put_set_field(&ofpacts, mf_from_id(MFF_TUN_FLAGS),
7314 &flag, &flag);
2eb79142
JG
7315 }
7316
7317 ofpact_put_OUTPUT(&ofpacts)->port = xport->ofp_port;
e491a67a 7318
1f4a8933
JR
7319 /* Actions here are not referring to anything versionable (flow tables or
7320 * groups) so we don't need to worry about the version here. */
7321 return ofproto_dpif_execute_actions(xport->xbridge->ofproto,
7322 OVS_VERSION_MAX, &flow, NULL,
2eb79142 7323 ofpacts.data, ofpacts.size, packet);
9583bc14 7324}
b256dc52 7325
901a517e 7326void
064799a1
JR
7327xlate_mac_learning_update(const struct ofproto_dpif *ofproto,
7328 ofp_port_t in_port, struct eth_addr dl_src,
7329 int vlan, bool is_grat_arp)
b256dc52 7330{
84f0f298 7331 struct xlate_cfg *xcfg = ovsrcu_get(struct xlate_cfg *, &xcfgp);
b256dc52
JS
7332 struct xbridge *xbridge;
7333 struct xbundle *xbundle;
b256dc52 7334
84f0f298 7335 xbridge = xbridge_lookup(xcfg, ofproto);
b256dc52
JS
7336 if (!xbridge) {
7337 return;
7338 }
7339
2d9b49dd 7340 xbundle = lookup_input_bundle__(xbridge, in_port, NULL);
b256dc52
JS
7341 if (!xbundle) {
7342 return;
7343 }
7344
2d9b49dd 7345 update_learning_table__(xbridge, xbundle, dl_src, vlan, is_grat_arp);
b256dc52 7346}
bef503e8 7347
88186383
AZ
7348void
7349xlate_set_support(const struct ofproto_dpif *ofproto,
7350 const struct dpif_backer_support *support)
7351{
7352 struct xlate_cfg *xcfg = ovsrcu_get(struct xlate_cfg *, &xcfgp);
7353 struct xbridge *xbridge = xbridge_lookup(xcfg, ofproto);
7354
7355 if (xbridge) {
7356 xbridge->support = *support;
7357 }
7358}