]> git.proxmox.com Git - ovs.git/blame - ofproto/ofproto-dpif-xlate.c
ofproto-dpif-xlate: Refactor xlate_table_actions()
[ovs.git] / ofproto / ofproto-dpif-xlate.c
CommitLineData
b827b231 1/* Copyright (c) 2009, 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017 Nicira, Inc.
9583bc14
EJ
2 *
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License. */
14
15#include <config.h>
16
17#include "ofproto/ofproto-dpif-xlate.h"
18
8449c4d6 19#include <errno.h>
a36de779
PS
20#include <arpa/inet.h>
21#include <net/if.h>
22#include <sys/socket.h>
23#include <netinet/in.h>
8449c4d6 24
db7d4e46 25#include "bfd.h"
9583bc14
EJ
26#include "bitmap.h"
27#include "bond.h"
28#include "bundle.h"
29#include "byte-order.h"
db7d4e46 30#include "cfm.h"
9583bc14
EJ
31#include "connmgr.h"
32#include "coverage.h"
46445c63 33#include "csum.h"
e14deea0 34#include "dp-packet.h"
9583bc14 35#include "dpif.h"
f7f1ea29 36#include "in-band.h"
db7d4e46 37#include "lacp.h"
9583bc14
EJ
38#include "learn.h"
39#include "mac-learning.h"
6d95c4e8 40#include "mcast-snooping.h"
9583bc14
EJ
41#include "multipath.h"
42#include "netdev-vport.h"
43#include "netlink.h"
44#include "nx-match.h"
45#include "odp-execute.h"
9583bc14 46#include "ofproto/ofproto-dpif-ipfix.h"
ec7ceaed 47#include "ofproto/ofproto-dpif-mirror.h"
60d02c72 48#include "ofproto/ofproto-dpif-monitor.h"
9583bc14 49#include "ofproto/ofproto-dpif-sflow.h"
2d9b49dd 50#include "ofproto/ofproto-dpif-trace.h"
901a517e 51#include "ofproto/ofproto-dpif-xlate-cache.h"
9583bc14 52#include "ofproto/ofproto-dpif.h"
6f00e29b 53#include "ofproto/ofproto-provider.h"
b598f214
BW
54#include "openvswitch/dynamic-string.h"
55#include "openvswitch/meta-flow.h"
56#include "openvswitch/list.h"
57#include "openvswitch/ofp-actions.h"
1fc11c59 58#include "openvswitch/ofp-ed-props.h"
b598f214
BW
59#include "openvswitch/vlog.h"
60#include "ovs-lldp.h"
a36de779 61#include "ovs-router.h"
b598f214
BW
62#include "packets.h"
63#include "tnl-neigh-cache.h"
a36de779 64#include "tnl-ports.h"
9583bc14 65#include "tunnel.h"
ee89ea7b 66#include "util.h"
9583bc14 67
46c88433 68COVERAGE_DEFINE(xlate_actions);
0f032e95 69COVERAGE_DEFINE(xlate_actions_oversize);
7d031d7e 70COVERAGE_DEFINE(xlate_actions_too_many_output);
9583bc14
EJ
71
72VLOG_DEFINE_THIS_MODULE(ofproto_dpif_xlate);
73
8a553e9a 74/* Maximum depth of flow table recursion (due to resubmit actions) in a
790c5d26
BP
75 * flow translation.
76 *
77 * The goal of limiting the depth of resubmits is to ensure that flow
78 * translation eventually terminates. Only resubmits to the same table or an
79 * earlier table count against the maximum depth. This is because resubmits to
80 * strictly monotonically increasing table IDs will eventually terminate, since
81 * any OpenFlow switch has a finite number of tables. OpenFlow tables are most
82 * commonly traversed in numerically increasing order, so this limit has little
83 * effect on conventionally designed OpenFlow pipelines.
84 *
85 * Outputs to patch ports and to groups also count against the depth limit. */
86#define MAX_DEPTH 64
8a553e9a 87
98b07853
BP
88/* Maximum number of resubmit actions in a flow translation, whether they are
89 * recursive or not. */
790c5d26 90#define MAX_RESUBMITS (MAX_DEPTH * MAX_DEPTH)
98b07853 91
46c88433
EJ
92struct xbridge {
93 struct hmap_node hmap_node; /* Node in global 'xbridges' map. */
94 struct ofproto_dpif *ofproto; /* Key in global 'xbridges' map. */
95
ca6ba700 96 struct ovs_list xbundles; /* Owned xbundles. */
46c88433
EJ
97 struct hmap xports; /* Indexed by ofp_port. */
98
99 char *name; /* Name used in log messages. */
89a8a7f0 100 struct dpif *dpif; /* Datapath interface. */
46c88433 101 struct mac_learning *ml; /* Mac learning handle. */
6d95c4e8 102 struct mcast_snooping *ms; /* Multicast Snooping handle. */
46c88433
EJ
103 struct mbridge *mbridge; /* Mirroring. */
104 struct dpif_sflow *sflow; /* SFlow handle, or null. */
105 struct dpif_ipfix *ipfix; /* Ipfix handle, or null. */
ce3955be 106 struct netflow *netflow; /* Netflow handle, or null. */
9d189a50 107 struct stp *stp; /* STP or null if disabled. */
9efd308e 108 struct rstp *rstp; /* RSTP or null if disabled. */
46c88433 109
46c88433
EJ
110 bool has_in_band; /* Bridge has in band control? */
111 bool forward_bpdu; /* Bridge forwards STP BPDUs? */
4b97b70d 112
b440dd8c
JS
113 /* Datapath feature support. */
114 struct dpif_backer_support support;
46c88433
EJ
115};
116
117struct xbundle {
118 struct hmap_node hmap_node; /* In global 'xbundles' map. */
119 struct ofbundle *ofbundle; /* Key in global 'xbundles' map. */
120
ca6ba700 121 struct ovs_list list_node; /* In parent 'xbridges' list. */
46c88433
EJ
122 struct xbridge *xbridge; /* Parent xbridge. */
123
ca6ba700 124 struct ovs_list xports; /* Contains "struct xport"s. */
46c88433
EJ
125
126 char *name; /* Name used in log messages. */
127 struct bond *bond; /* Nonnull iff more than one port. */
128 struct lacp *lacp; /* LACP handle or null. */
129
130 enum port_vlan_mode vlan_mode; /* VLAN mode. */
fed8962a
EG
131 uint16_t qinq_ethtype; /* Ethertype of dot1q-tunnel interface
132 * either 0x8100 or 0x88a8. */
46c88433
EJ
133 int vlan; /* -1=trunk port, else a 12-bit VLAN ID. */
134 unsigned long *trunks; /* Bitmap of trunked VLANs, if 'vlan' == -1.
135 * NULL if all VLANs are trunked. */
fed8962a
EG
136 unsigned long *cvlans; /* Bitmap of allowed customer vlans,
137 * NULL if all VLANs are allowed */
46c88433
EJ
138 bool use_priority_tags; /* Use 802.1p tag for frames in VLAN 0? */
139 bool floodable; /* No port has OFPUTIL_PC_NO_FLOOD set? */
c005f976 140 bool protected; /* Protected port mode */
46c88433
EJ
141};
142
143struct xport {
144 struct hmap_node hmap_node; /* Node in global 'xports' map. */
145 struct ofport_dpif *ofport; /* Key in global 'xports map. */
146
147 struct hmap_node ofp_node; /* Node in parent xbridge 'xports' map. */
148 ofp_port_t ofp_port; /* Key in parent xbridge 'xports' map. */
149
150 odp_port_t odp_port; /* Datapath port number or ODPP_NONE. */
151
ca6ba700 152 struct ovs_list bundle_node; /* In parent xbundle (if it exists). */
46c88433
EJ
153 struct xbundle *xbundle; /* Parent xbundle or null. */
154
155 struct netdev *netdev; /* 'ofport''s netdev. */
156
157 struct xbridge *xbridge; /* Parent bridge. */
158 struct xport *peer; /* Patch port peer or null. */
159
160 enum ofputil_port_config config; /* OpenFlow port configuration. */
dd8cd4b4 161 enum ofputil_port_state state; /* OpenFlow port state. */
92cf817b 162 int stp_port_no; /* STP port number or -1 if not in use. */
f025bcb7 163 struct rstp_port *rstp_port; /* RSTP port or null. */
46c88433 164
55954f6e
EJ
165 struct hmap skb_priorities; /* Map of 'skb_priority_to_dscp's. */
166
46c88433
EJ
167 bool may_enable; /* May be enabled in bonds. */
168 bool is_tunnel; /* Is a tunnel port. */
875ab130 169 enum netdev_pt_mode pt_mode; /* packet_type handling. */
46c88433
EJ
170
171 struct cfm *cfm; /* CFM handle or null. */
172 struct bfd *bfd; /* BFD handle or null. */
0477baa9 173 struct lldp *lldp; /* LLDP handle or null. */
46c88433
EJ
174};
175
4d0acc70
EJ
176struct xlate_ctx {
177 struct xlate_in *xin;
178 struct xlate_out *xout;
179
46c88433 180 const struct xbridge *xbridge;
4d0acc70
EJ
181
182 /* Flow at the last commit. */
183 struct flow base_flow;
184
185 /* Tunnel IP destination address as received. This is stored separately
186 * as the base_flow.tunnel is cleared on init to reflect the datapath
187 * behavior. Used to make sure not to send tunneled output to ourselves,
188 * which might lead to an infinite loop. This could happen easily
189 * if a tunnel is marked as 'ip_remote=flow', and the flow does not
190 * actually set the tun_dst field. */
e4d3706c 191 struct in6_addr orig_tunnel_ipv6_dst;
4d0acc70 192
84cf3c1f
JR
193 /* Stack for the push and pop actions. See comment above nx_stack_push()
194 * in nx-match.c for info on how the stack is stored. */
4d0acc70
EJ
195 struct ofpbuf stack;
196
197 /* The rule that we are currently translating, or NULL. */
198 struct rule_dpif *rule;
199
49a73e0c
BP
200 /* Flow translation populates this with wildcards relevant in translation.
201 * When 'xin->wc' is nonnull, this is the same pointer. When 'xin->wc' is
c0e638aa 202 * null, this is a pointer to a temporary buffer. */
49a73e0c
BP
203 struct flow_wildcards *wc;
204
1520ef4f
BP
205 /* Output buffer for datapath actions. When 'xin->odp_actions' is nonnull,
206 * this is the same pointer. When 'xin->odp_actions' is null, this points
207 * to a scratch ofpbuf. This allows code to add actions to
208 * 'ctx->odp_actions' without worrying about whether the caller really
209 * wants actions. */
210 struct ofpbuf *odp_actions;
211
790c5d26
BP
212 /* Statistics maintained by xlate_table_action().
213 *
2d9b49dd 214 * These statistics limit the amount of work that a single flow
790c5d26
BP
215 * translation can perform. The goal of the first of these, 'depth', is
216 * primarily to prevent translation from performing an infinite amount of
217 * work. It counts the current depth of nested "resubmit"s (and a few
218 * other activities); when a resubmit returns, it decreases. Resubmits to
219 * tables in strictly monotonically increasing order don't contribute to
220 * 'depth' because they cannot cause a flow translation to take an infinite
221 * amount of time (because the number of tables is finite). Translation
222 * aborts when 'depth' exceeds MAX_DEPTH.
223 *
224 * 'resubmits', on the other hand, prevents flow translation from
225 * performing an extraordinarily large while still finite amount of work.
226 * It counts the total number of resubmits (and a few other activities)
227 * that have been executed. Returning from a resubmit does not affect this
228 * counter. Thus, this limits the amount of work that a particular
229 * translation can perform. Translation aborts when 'resubmits' exceeds
230 * MAX_RESUBMITS (which is much larger than MAX_DEPTH).
231 */
790c5d26 232 int depth; /* Current resubmit nesting depth. */
98b07853 233 int resubmits; /* Total number of resubmits. */
5a070238 234 bool in_group; /* Currently translating ofgroup, if true. */
029ca940 235 bool in_action_set; /* Currently translating action_set, if true. */
331c07ac
YHW
236 bool in_packet_out; /* Currently translating a packet_out msg, if
237 * true. */
1fc11c59
JS
238 bool pending_encap; /* True when waiting to commit a pending
239 * encap action. */
240 struct ofpbuf *encap_data; /* May contain a pointer to an ofpbuf with
241 * context for the datapath encap action.*/
98b07853 242
4d0acc70 243 uint8_t table_id; /* OpenFlow table ID where flow was found. */
8b1e5560
JR
244 ovs_be64 rule_cookie; /* Cookie of the rule being translated. */
245 uint32_t orig_skb_priority; /* Priority when packet arrived. */
4d0acc70 246 uint32_t sflow_n_outputs; /* Number of output ports. */
4e022ec0 247 odp_port_t sflow_odp_port; /* Output port for composing sFlow action. */
2031ef97 248 ofp_port_t nf_output_iface; /* Output interface index for NetFlow. */
4d0acc70 249 bool exit; /* No further actions should be processed. */
3d6151f3 250 mirror_mask_t mirrors; /* Bitmap of associated mirrors. */
1356dbd1 251 int mirror_snaplen; /* Max size of a mirror packet in byte. */
7fdb60a7 252
1d361a81
BP
253 /* Freezing Translation
254 * ====================
e672ff9b 255 *
1d361a81
BP
256 * At some point during translation, the code may recognize the need to halt
257 * and checkpoint the translation in a way that it can be restarted again
258 * later. We call the checkpointing process "freezing" and the restarting
259 * process "thawing".
e672ff9b 260 *
1d361a81 261 * The use cases for freezing are:
e672ff9b 262 *
1d361a81
BP
263 * - "Recirculation", where the translation process discovers that it
264 * doesn't have enough information to complete translation without
265 * actually executing the actions that have already been translated,
266 * which provides the additionally needed information. In these
267 * situations, translation freezes translation and assigns the frozen
268 * data a unique "recirculation ID", which it associates with the data
269 * in a table in userspace (see ofproto-dpif-rid.h). It also adds a
270 * OVS_ACTION_ATTR_RECIRC action specifying that ID to the datapath
271 * actions. When a packet hits that action, the datapath looks its
272 * flow up again using the ID. If there's a miss, it comes back to
273 * userspace, which find the recirculation table entry for the ID,
274 * thaws the associated frozen data, and continues translation from
275 * that point given the additional information that is now known.
e672ff9b 276 *
1d361a81
BP
277 * The archetypal example is MPLS. As MPLS is implemented in
278 * OpenFlow, the protocol that follows the last MPLS label becomes
279 * known only when that label is popped by an OpenFlow action. That
280 * means that Open vSwitch can't extract the headers beyond the MPLS
281 * labels until the pop action is executed. Thus, at that point
282 * translation uses the recirculation process to extract the headers
283 * beyond the MPLS labels.
e672ff9b 284 *
1d361a81
BP
285 * (OVS also uses OVS_ACTION_ATTR_RECIRC to implement hashing for
286 * output to bonds. OVS pre-populates all the datapath flows for bond
287 * output in the datapath, though, which means that the elaborate
288 * process of coming back to userspace for a second round of
289 * translation isn't needed, and so bonds don't follow the above
290 * process.)
e672ff9b 291 *
77ab5fd2
BP
292 * - "Continuation". A continuation is a way for an OpenFlow controller
293 * to interpose on a packet's traversal of the OpenFlow tables. When
294 * the translation process encounters a "controller" action with the
295 * "pause" flag, it freezes translation, serializes the frozen data,
296 * and sends it to an OpenFlow controller. The controller then
297 * examines and possibly modifies the frozen data and eventually sends
298 * it back to the switch, which thaws it and continues translation.
e672ff9b 299 *
1d361a81
BP
300 * The main problem of freezing translation is preserving state, so that
301 * when the translation is thawed later it resumes from where it left off,
302 * without disruption. In particular, actions must be preserved as follows:
303 *
304 * - If we're freezing because an action needed more information, the
305 * action that prompted it.
306 *
307 * - Any actions remaining to be translated within the current flow.
308 *
309 * - If translation was frozen within a NXAST_RESUBMIT, then any actions
310 * following the resubmit action. Resubmit actions can be nested, so
311 * this has to go all the way up the control stack.
e672ff9b
JR
312 *
313 * - The OpenFlow 1.1+ action set.
314 *
315 * State that actions and flow table lookups can depend on, such as the
316 * following, must also be preserved:
317 *
318 * - Metadata fields (input port, registers, OF1.1+ metadata, ...).
319 *
1d361a81 320 * - The stack used by NXAST_STACK_PUSH and NXAST_STACK_POP actions.
e672ff9b
JR
321 *
322 * - The table ID and cookie of the flow being translated at each level
1d361a81
BP
323 * of the control stack, because these can become visible through
324 * OFPAT_CONTROLLER actions (and other ways).
e672ff9b
JR
325 *
326 * Translation allows for the control of this state preservation via these
1d361a81
BP
327 * members. When a need to freeze translation is identified, the
328 * translation process:
e672ff9b 329 *
1d361a81 330 * 1. Sets 'freezing' to true.
e672ff9b
JR
331 *
332 * 2. Sets 'exit' to true to tell later steps that we're exiting from the
333 * translation process.
334 *
1d361a81
BP
335 * 3. Adds an OFPACT_UNROLL_XLATE action to 'frozen_actions', and points
336 * frozen_actions.header to the action to make it easy to find it later.
337 * This action holds the current table ID and cookie so that they can be
338 * restored during a post-recirculation upcall translation.
e672ff9b
JR
339 *
340 * 4. Adds the action that prompted recirculation and any actions following
1d361a81 341 * it within the same flow to 'frozen_actions', so that they can be
8a5fb3b4 342 * executed during a post-recirculation upcall translation.
e672ff9b
JR
343 *
344 * 5. Returns.
345 *
346 * 6. The action that prompted recirculation might be nested in a stack of
347 * nested "resubmit"s that have actions remaining. Each of these notices
1d361a81
BP
348 * that we're exiting and freezing and responds by adding more
349 * OFPACT_UNROLL_XLATE actions to 'frozen_actions', as necessary,
350 * followed by any actions that were yet unprocessed.
e672ff9b 351 *
1d361a81
BP
352 * If we're freezing because of recirculation, the caller generates a
353 * recirculation ID and associates all the state produced by this process
354 * with it. For post-recirculation upcall translation, the caller passes it
355 * back in for the new translation to execute. The process yielded a set of
356 * ofpacts that can be translated directly, so it is not much of a special
357 * case at that point.
e672ff9b 358 */
1d361a81 359 bool freezing;
53cc166a
JR
360 bool recirc_update_dp_hash; /* Generated recirculation will be preceded
361 * by datapath HASH action to get an updated
362 * dp_hash after recirculation. */
363 uint32_t dp_hash_alg;
364 uint32_t dp_hash_basis;
1d361a81 365 struct ofpbuf frozen_actions;
77ab5fd2 366 const struct ofpact_controller *pause;
0d3239e8 367 struct flow *paused_flow;
e672ff9b 368
e12ec36b
SH
369 /* True if a packet was but is no longer MPLS (due to an MPLS pop action).
370 * This is a trigger for recirculation in cases where translating an action
371 * or looking up a flow requires access to the fields of the packet after
372 * the MPLS label stack that was originally present. */
373 bool was_mpls;
374
07659514
JS
375 /* True if conntrack has been performed on this packet during processing
376 * on the current bridge. This is used to determine whether conntrack
1d361a81 377 * state from the datapath should be honored after thawing. */
07659514
JS
378 bool conntracked;
379
9ac0aada
JR
380 /* Pointer to an embedded NAT action in a conntrack action, or NULL. */
381 struct ofpact_nat *ct_nat_action;
382
7fdb60a7
SH
383 /* OpenFlow 1.1+ action set.
384 *
385 * 'action_set' accumulates "struct ofpact"s added by OFPACT_WRITE_ACTIONS.
386 * When translation is otherwise complete, ofpacts_execute_action_set()
387 * converts it to a set of "struct ofpact"s that can be translated into
ed9c9e3e 388 * datapath actions. */
c61f3870 389 bool action_set_has_group; /* Action set contains OFPACT_GROUP? */
7fdb60a7 390 struct ofpbuf action_set; /* Action set. */
fff1b9c0
JR
391
392 enum xlate_error error; /* Translation failed. */
4d0acc70
EJ
393};
394
f0fb825a
EG
395/* Structure to track VLAN manipulation */
396struct xvlan_single {
397 uint16_t tpid;
398 uint16_t vid;
399 uint16_t pcp;
400};
401
402struct xvlan {
403 struct xvlan_single v[FLOW_MAX_VLAN_HEADERS];
404};
405
fff1b9c0
JR
406const char *xlate_strerror(enum xlate_error error)
407{
408 switch (error) {
409 case XLATE_OK:
410 return "OK";
411 case XLATE_BRIDGE_NOT_FOUND:
412 return "Bridge not found";
413 case XLATE_RECURSION_TOO_DEEP:
414 return "Recursion too deep";
415 case XLATE_TOO_MANY_RESUBMITS:
416 return "Too many resubmits";
417 case XLATE_STACK_TOO_DEEP:
418 return "Stack too deep";
419 case XLATE_NO_RECIRCULATION_CONTEXT:
420 return "No recirculation context";
421 case XLATE_RECIRCULATION_CONFLICT:
422 return "Recirculation conflict";
423 case XLATE_TOO_MANY_MPLS_LABELS:
424 return "Too many MPLS labels";
8d8ab6c2
JG
425 case XLATE_INVALID_TUNNEL_METADATA:
426 return "Invalid tunnel metadata";
fff1b9c0
JR
427 }
428 return "Unknown error";
429}
430
ed9c9e3e 431static void xlate_action_set(struct xlate_ctx *ctx);
704bb0bf 432static void xlate_commit_actions(struct xlate_ctx *ctx);
ed9c9e3e 433
8bdb2bdb
SC
434static void
435apply_nested_clone_actions(struct xlate_ctx *ctx, const struct xport *in_dev,
436 struct xport *out_dev);
437
1d741d6d 438static void
1d361a81 439ctx_trigger_freeze(struct xlate_ctx *ctx)
1d741d6d
JR
440{
441 ctx->exit = true;
1d361a81 442 ctx->freezing = true;
1d741d6d
JR
443}
444
53cc166a
JR
445static void
446ctx_trigger_recirculate_with_hash(struct xlate_ctx *ctx, uint32_t type,
447 uint32_t basis)
448{
449 ctx->exit = true;
450 ctx->freezing = true;
451 ctx->recirc_update_dp_hash = true;
452 ctx->dp_hash_alg = type;
453 ctx->dp_hash_basis = basis;
454}
455
1d741d6d 456static bool
1d361a81 457ctx_first_frozen_action(const struct xlate_ctx *ctx)
1d741d6d 458{
1d361a81 459 return !ctx->frozen_actions.size;
e672ff9b
JR
460}
461
3293cb85 462static void
1d361a81 463ctx_cancel_freeze(struct xlate_ctx *ctx)
3293cb85 464{
1d361a81
BP
465 if (ctx->freezing) {
466 ctx->freezing = false;
53cc166a 467 ctx->recirc_update_dp_hash = false;
1d361a81
BP
468 ofpbuf_clear(&ctx->frozen_actions);
469 ctx->frozen_actions.header = NULL;
3293cb85
BP
470 }
471}
472
77ab5fd2 473static void finish_freezing(struct xlate_ctx *ctx);
e672ff9b 474
9583bc14
EJ
475/* A controller may use OFPP_NONE as the ingress port to indicate that
476 * it did not arrive on a "real" port. 'ofpp_none_bundle' exists for
477 * when an input bundle is needed for validation (e.g., mirroring or
478 * OFPP_NORMAL processing). It is not connected to an 'ofproto' or have
3548d242
BP
479 * any 'port' structs, so care must be taken when dealing with it. */
480static struct xbundle ofpp_none_bundle = {
481 .name = "OFPP_NONE",
482 .vlan_mode = PORT_VLAN_TRUNK
483};
9583bc14 484
55954f6e
EJ
485/* Node in 'xport''s 'skb_priorities' map. Used to maintain a map from
486 * 'priority' (the datapath's term for QoS queue) to the dscp bits which all
487 * traffic egressing the 'ofport' with that priority should be marked with. */
488struct skb_priority_to_dscp {
489 struct hmap_node hmap_node; /* Node in 'ofport_dpif''s 'skb_priorities'. */
490 uint32_t skb_priority; /* Priority of this queue (see struct flow). */
491
492 uint8_t dscp; /* DSCP bits to mark outgoing traffic with. */
493};
494
84f0f298
RW
495/* Xlate config contains hash maps of all bridges, bundles and ports.
496 * Xcfgp contains the pointer to the current xlate configuration.
497 * When the main thread needs to change the configuration, it copies xcfgp to
498 * new_xcfg and edits new_xcfg. This enables the use of RCU locking which
499 * does not block handler and revalidator threads. */
500struct xlate_cfg {
501 struct hmap xbridges;
502 struct hmap xbundles;
503 struct hmap xports;
504};
b1b72f2d 505static OVSRCU_TYPE(struct xlate_cfg *) xcfgp = OVSRCU_INITIALIZER(NULL);
f439f23b 506static struct xlate_cfg *new_xcfg = NULL;
46c88433 507
96c3a6e5
AZ
508typedef void xlate_actions_handler(const struct ofpact *, size_t ofpacts_len,
509 struct xlate_ctx *, bool);
510
46c88433 511static bool may_receive(const struct xport *, struct xlate_ctx *);
9583bc14 512static void do_xlate_actions(const struct ofpact *, size_t ofpacts_len,
feee58b9 513 struct xlate_ctx *, bool);
96c3a6e5
AZ
514static void clone_xlate_actions(const struct ofpact *, size_t ofpacts_len,
515 struct xlate_ctx *, bool);
adcf00ba 516static void xlate_normal(struct xlate_ctx *);
6d328fa2
SH
517static void xlate_table_action(struct xlate_ctx *, ofp_port_t in_port,
518 uint8_t table_id, bool may_packet_in,
feee58b9 519 bool honor_table_miss, bool with_ct_orig,
96c3a6e5
AZ
520 bool is_last_action, xlate_actions_handler *);
521
2d9b49dd
BP
522static bool input_vid_is_valid(const struct xlate_ctx *,
523 uint16_t vid, struct xbundle *);
f0fb825a
EG
524static void xvlan_copy(struct xvlan *dst, const struct xvlan *src);
525static void xvlan_pop(struct xvlan *src);
fed8962a 526static void xvlan_push_uninit(struct xvlan *src);
f0fb825a
EG
527static void xvlan_extract(const struct flow *, struct xvlan *);
528static void xvlan_put(struct flow *, const struct xvlan *);
529static void xvlan_input_translate(const struct xbundle *,
530 const struct xvlan *in,
531 struct xvlan *xvlan);
532static void xvlan_output_translate(const struct xbundle *,
533 const struct xvlan *xvlan,
534 struct xvlan *out);
46c88433 535static void output_normal(struct xlate_ctx *, const struct xbundle *,
f0fb825a 536 const struct xvlan *);
e93ef1c7
JR
537
538/* Optional bond recirculation parameter to compose_output_action(). */
539struct xlate_bond_recirc {
540 uint32_t recirc_id; /* !0 Use recirculation instead of output. */
541 uint8_t hash_alg; /* !0 Compute hash for recirc before. */
542 uint32_t hash_basis; /* Compute hash for recirc before. */
543};
544
545static void compose_output_action(struct xlate_ctx *, ofp_port_t ofp_port,
feee58b9
AZ
546 const struct xlate_bond_recirc *xr,
547 bool is_last_action);
9583bc14 548
84f0f298
RW
549static struct xbridge *xbridge_lookup(struct xlate_cfg *,
550 const struct ofproto_dpif *);
290835f9
BP
551static struct xbridge *xbridge_lookup_by_uuid(struct xlate_cfg *,
552 const struct uuid *);
84f0f298
RW
553static struct xbundle *xbundle_lookup(struct xlate_cfg *,
554 const struct ofbundle *);
555static struct xport *xport_lookup(struct xlate_cfg *,
556 const struct ofport_dpif *);
46c88433 557static struct xport *get_ofp_port(const struct xbridge *, ofp_port_t ofp_port);
55954f6e
EJ
558static struct skb_priority_to_dscp *get_skb_priority(const struct xport *,
559 uint32_t skb_priority);
560static void clear_skb_priorities(struct xport *);
16194afd 561static size_t count_skb_priorities(const struct xport *);
55954f6e
EJ
562static bool dscp_from_skb_priority(const struct xport *, uint32_t skb_priority,
563 uint8_t *dscp);
46c88433 564
84f0f298
RW
565static void xlate_xbridge_init(struct xlate_cfg *, struct xbridge *);
566static void xlate_xbundle_init(struct xlate_cfg *, struct xbundle *);
567static void xlate_xport_init(struct xlate_cfg *, struct xport *);
9efd308e 568static void xlate_xbridge_set(struct xbridge *, struct dpif *,
9efd308e
DV
569 const struct mac_learning *, struct stp *,
570 struct rstp *, const struct mcast_snooping *,
571 const struct mbridge *,
572 const struct dpif_sflow *,
573 const struct dpif_ipfix *,
2f47cdf4 574 const struct netflow *,
84f0f298 575 bool forward_bpdu, bool has_in_band,
b440dd8c 576 const struct dpif_backer_support *);
84f0f298 577static void xlate_xbundle_set(struct xbundle *xbundle,
f0fb825a 578 enum port_vlan_mode vlan_mode,
fed8962a
EG
579 uint16_t qinq_ethtype, int vlan,
580 unsigned long *trunks, unsigned long *cvlans,
f0fb825a 581 bool use_priority_tags,
84f0f298 582 const struct bond *bond, const struct lacp *lacp,
c005f976 583 bool floodable, bool protected);
84f0f298
RW
584static void xlate_xport_set(struct xport *xport, odp_port_t odp_port,
585 const struct netdev *netdev, const struct cfm *cfm,
0477baa9
DF
586 const struct bfd *bfd, const struct lldp *lldp,
587 int stp_port_no, const struct rstp_port *rstp_port,
84f0f298
RW
588 enum ofputil_port_config config,
589 enum ofputil_port_state state, bool is_tunnel,
590 bool may_enable);
591static void xlate_xbridge_remove(struct xlate_cfg *, struct xbridge *);
592static void xlate_xbundle_remove(struct xlate_cfg *, struct xbundle *);
593static void xlate_xport_remove(struct xlate_cfg *, struct xport *);
594static void xlate_xbridge_copy(struct xbridge *);
595static void xlate_xbundle_copy(struct xbridge *, struct xbundle *);
596static void xlate_xport_copy(struct xbridge *, struct xbundle *,
597 struct xport *);
598static void xlate_xcfg_free(struct xlate_cfg *);
2d9b49dd
BP
599\f
600/* Tracing helpers. */
601
602/* If tracing is enabled in 'ctx', creates a new trace node and appends it to
603 * the list of nodes maintained in ctx->xin. The new node has type 'type' and
604 * its text is created from 'format' by treating it as a printf format string.
605 * Returns the list of nodes embedded within the new trace node; ordinarily,
606 * the calleer can ignore this, but it is useful if the caller needs to nest
607 * more trace nodes within the new node.
608 *
609 * If tracing is not enabled, does nothing and returns NULL. */
610static struct ovs_list * OVS_PRINTF_FORMAT(3, 4)
611xlate_report(const struct xlate_ctx *ctx, enum oftrace_node_type type,
612 const char *format, ...)
34dd0d78 613{
2d9b49dd
BP
614 struct ovs_list *subtrace = NULL;
615 if (OVS_UNLIKELY(ctx->xin->trace)) {
c1b3756c 616 va_list args;
c1b3756c 617 va_start(args, format);
2d9b49dd
BP
618 char *text = xvasprintf(format, args);
619 subtrace = &oftrace_report(ctx->xin->trace, type, text)->subs;
c1b3756c 620 va_end(args);
2d9b49dd 621 free(text);
34dd0d78 622 }
2d9b49dd 623 return subtrace;
34dd0d78 624}
84f0f298 625
2d9b49dd
BP
626/* This is like xlate_report() for errors that are serious enough that we
627 * should log them even if we are not tracing. */
628static void OVS_PRINTF_FORMAT(2, 3)
629xlate_report_error(const struct xlate_ctx *ctx, const char *format, ...)
630{
631 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
632 if (!OVS_UNLIKELY(ctx->xin->trace)
633 && (!ctx->xin->packet || VLOG_DROP_WARN(&rl))) {
634 return;
635 }
636
637 struct ds s = DS_EMPTY_INITIALIZER;
638 va_list args;
639 va_start(args, format);
640 ds_put_format_valist(&s, format, args);
641 va_end(args);
642
643 if (ctx->xin->trace) {
644 oftrace_report(ctx->xin->trace, OFT_ERROR, ds_cstr(&s));
645 } else {
646 ds_put_cstr(&s, " while processing ");
50f96b10 647 flow_format(&s, &ctx->base_flow, NULL);
2d9b49dd
BP
648 ds_put_format(&s, " on bridge %s", ctx->xbridge->name);
649 VLOG_WARN("%s", ds_cstr(&s));
650 }
651 ds_destroy(&s);
652}
653
654/* This is like xlate_report() for messages that should be logged at debug
655 * level (even if we are not tracing) because they can be valuable for
656 * debugging. */
657static void OVS_PRINTF_FORMAT(3, 4)
658xlate_report_debug(const struct xlate_ctx *ctx, enum oftrace_node_type type,
659 const char *format, ...)
660{
661 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(30, 300);
662 if (!OVS_UNLIKELY(ctx->xin->trace)
663 && (!ctx->xin->packet || VLOG_DROP_DBG(&rl))) {
664 return;
665 }
666
667 struct ds s = DS_EMPTY_INITIALIZER;
668 va_list args;
669 va_start(args, format);
670 ds_put_format_valist(&s, format, args);
671 va_end(args);
fff1b9c0 672
2d9b49dd
BP
673 if (ctx->xin->trace) {
674 oftrace_report(ctx->xin->trace, type, ds_cstr(&s));
675 } else {
676 VLOG_DBG("bridge %s: %s", ctx->xbridge->name, ds_cstr(&s));
677 }
678 ds_destroy(&s);
679}
fff1b9c0 680
2d9b49dd
BP
681/* If tracing is enabled in 'ctx', appends a node of the given 'type' to the
682 * trace, whose text is 'title' followed by a formatted version of the
683 * 'ofpacts_len' OpenFlow actions in 'ofpacts'.
684 *
685 * If tracing is not enabled, does nothing. */
686static void
687xlate_report_actions(const struct xlate_ctx *ctx, enum oftrace_node_type type,
688 const char *title,
d6bef3cc
BP
689 const struct ofpact *ofpacts, size_t ofpacts_len)
690{
2d9b49dd 691 if (OVS_UNLIKELY(ctx->xin->trace)) {
d6bef3cc 692 struct ds s = DS_EMPTY_INITIALIZER;
2d9b49dd 693 ds_put_format(&s, "%s: ", title);
50f96b10 694 ofpacts_format(ofpacts, ofpacts_len, NULL, &s);
2d9b49dd 695 oftrace_report(ctx->xin->trace, type, ds_cstr(&s));
d6bef3cc
BP
696 ds_destroy(&s);
697 }
698}
699
2d9b49dd
BP
700/* If tracing is enabled in 'ctx', appends a node of type OFT_DETAIL to the
701 * trace, whose the message is a formatted version of the OpenFlow action set.
702 * 'verb' should be "was" or "is", depending on whether the action set reported
703 * is the new action set or the old one.
704 *
705 * If tracing is not enabled, does nothing. */
706static void
707xlate_report_action_set(const struct xlate_ctx *ctx, const char *verb)
708{
709 if (OVS_UNLIKELY(ctx->xin->trace)) {
710 struct ofpbuf action_list;
711 ofpbuf_init(&action_list, 0);
712 ofpacts_execute_action_set(&action_list, &ctx->action_set);
713 if (action_list.size) {
714 struct ds s = DS_EMPTY_INITIALIZER;
50f96b10 715 ofpacts_format(action_list.data, action_list.size, NULL, &s);
2d9b49dd
BP
716 xlate_report(ctx, OFT_DETAIL, "action set %s: %s",
717 verb, ds_cstr(&s));
718 ds_destroy(&s);
719 } else {
720 xlate_report(ctx, OFT_DETAIL, "action set %s empty", verb);
721 }
722 ofpbuf_uninit(&action_list);
723 }
724}
725
726
727/* If tracing is enabled in 'ctx', appends a node representing 'rule' (in
728 * OpenFlow table 'table_id') to the trace and makes this node the parent for
729 * future trace nodes. The caller should save ctx->xin->trace before calling
730 * this function, then after tracing all of the activities under the table,
731 * restore its previous value.
732 *
733 * If tracing is not enabled, does nothing. */
734static void
735xlate_report_table(const struct xlate_ctx *ctx, struct rule_dpif *rule,
736 uint8_t table_id)
737{
738 if (OVS_LIKELY(!ctx->xin->trace)) {
739 return;
740 }
741
742 struct ds s = DS_EMPTY_INITIALIZER;
743 ds_put_format(&s, "%2d. ", table_id);
744 if (rule == ctx->xin->ofproto->miss_rule) {
745 ds_put_cstr(&s, "No match, and a \"packet-in\" is called for.");
746 } else if (rule == ctx->xin->ofproto->no_packet_in_rule) {
747 ds_put_cstr(&s, "No match.");
748 } else if (rule == ctx->xin->ofproto->drop_frags_rule) {
749 ds_put_cstr(&s, "Packets are IP fragments and "
750 "the fragment handling mode is \"drop\".");
751 } else {
752 minimatch_format(&rule->up.cr.match,
753 ofproto_get_tun_tab(&ctx->xin->ofproto->up),
50f96b10 754 NULL, &s, OFP_DEFAULT_PRIORITY);
2d9b49dd
BP
755 if (ds_last(&s) != ' ') {
756 ds_put_cstr(&s, ", ");
757 }
758 ds_put_format(&s, "priority %d", rule->up.cr.priority);
759 if (rule->up.flow_cookie) {
760 ds_put_format(&s, ", cookie %#"PRIx64,
761 ntohll(rule->up.flow_cookie));
762 }
763 }
764 ctx->xin->trace = &oftrace_report(ctx->xin->trace, OFT_TABLE,
765 ds_cstr(&s))->subs;
766 ds_destroy(&s);
767}
768
769/* If tracing is enabled in 'ctx', adds an OFT_DETAIL trace node to 'ctx'
770 * reporting the value of subfield 'sf'.
771 *
772 * If tracing is not enabled, does nothing. */
773static void
774xlate_report_subfield(const struct xlate_ctx *ctx,
775 const struct mf_subfield *sf)
776{
777 if (OVS_UNLIKELY(ctx->xin->trace)) {
778 struct ds s = DS_EMPTY_INITIALIZER;
779 mf_format_subfield(sf, &s);
780 ds_put_cstr(&s, " is now ");
781
782 if (sf->ofs == 0 && sf->n_bits >= sf->field->n_bits) {
783 union mf_value value;
784 mf_get_value(sf->field, &ctx->xin->flow, &value);
50f96b10 785 mf_format(sf->field, &value, NULL, NULL, &s);
2d9b49dd
BP
786 } else {
787 union mf_subvalue cst;
788 mf_read_subfield(sf, &ctx->xin->flow, &cst);
789 ds_put_hex(&s, &cst, sizeof cst);
790 }
791
792 xlate_report(ctx, OFT_DETAIL, "%s", ds_cstr(&s));
793
794 ds_destroy(&s);
795 }
796}
797\f
84f0f298
RW
798static void
799xlate_xbridge_init(struct xlate_cfg *xcfg, struct xbridge *xbridge)
800{
417e7e66 801 ovs_list_init(&xbridge->xbundles);
84f0f298
RW
802 hmap_init(&xbridge->xports);
803 hmap_insert(&xcfg->xbridges, &xbridge->hmap_node,
804 hash_pointer(xbridge->ofproto, 0));
805}
806
807static void
808xlate_xbundle_init(struct xlate_cfg *xcfg, struct xbundle *xbundle)
809{
417e7e66
BW
810 ovs_list_init(&xbundle->xports);
811 ovs_list_insert(&xbundle->xbridge->xbundles, &xbundle->list_node);
84f0f298
RW
812 hmap_insert(&xcfg->xbundles, &xbundle->hmap_node,
813 hash_pointer(xbundle->ofbundle, 0));
814}
815
816static void
817xlate_xport_init(struct xlate_cfg *xcfg, struct xport *xport)
818{
819 hmap_init(&xport->skb_priorities);
820 hmap_insert(&xcfg->xports, &xport->hmap_node,
821 hash_pointer(xport->ofport, 0));
822 hmap_insert(&xport->xbridge->xports, &xport->ofp_node,
823 hash_ofp_port(xport->ofp_port));
824}
825
826static void
827xlate_xbridge_set(struct xbridge *xbridge,
828 struct dpif *dpif,
ec89fc6f 829 const struct mac_learning *ml, struct stp *stp,
9efd308e 830 struct rstp *rstp, const struct mcast_snooping *ms,
ec89fc6f 831 const struct mbridge *mbridge,
46c88433 832 const struct dpif_sflow *sflow,
ce3955be 833 const struct dpif_ipfix *ipfix,
2f47cdf4 834 const struct netflow *netflow,
4b97b70d 835 bool forward_bpdu, bool has_in_band,
b440dd8c 836 const struct dpif_backer_support *support)
46c88433 837{
46c88433
EJ
838 if (xbridge->ml != ml) {
839 mac_learning_unref(xbridge->ml);
840 xbridge->ml = mac_learning_ref(ml);
841 }
842
6d95c4e8
FL
843 if (xbridge->ms != ms) {
844 mcast_snooping_unref(xbridge->ms);
845 xbridge->ms = mcast_snooping_ref(ms);
846 }
847
46c88433
EJ
848 if (xbridge->mbridge != mbridge) {
849 mbridge_unref(xbridge->mbridge);
850 xbridge->mbridge = mbridge_ref(mbridge);
851 }
852
853 if (xbridge->sflow != sflow) {
854 dpif_sflow_unref(xbridge->sflow);
855 xbridge->sflow = dpif_sflow_ref(sflow);
856 }
857
858 if (xbridge->ipfix != ipfix) {
859 dpif_ipfix_unref(xbridge->ipfix);
860 xbridge->ipfix = dpif_ipfix_ref(ipfix);
861 }
862
9d189a50
EJ
863 if (xbridge->stp != stp) {
864 stp_unref(xbridge->stp);
865 xbridge->stp = stp_ref(stp);
866 }
867
9efd308e
DV
868 if (xbridge->rstp != rstp) {
869 rstp_unref(xbridge->rstp);
870 xbridge->rstp = rstp_ref(rstp);
871 }
872
ce3955be
EJ
873 if (xbridge->netflow != netflow) {
874 netflow_unref(xbridge->netflow);
875 xbridge->netflow = netflow_ref(netflow);
876 }
877
89a8a7f0 878 xbridge->dpif = dpif;
46c88433
EJ
879 xbridge->forward_bpdu = forward_bpdu;
880 xbridge->has_in_band = has_in_band;
b440dd8c 881 xbridge->support = *support;
46c88433
EJ
882}
883
84f0f298
RW
884static void
885xlate_xbundle_set(struct xbundle *xbundle,
fed8962a
EG
886 enum port_vlan_mode vlan_mode, uint16_t qinq_ethtype,
887 int vlan, unsigned long *trunks, unsigned long *cvlans,
f0fb825a 888 bool use_priority_tags,
84f0f298 889 const struct bond *bond, const struct lacp *lacp,
c005f976 890 bool floodable, bool protected)
84f0f298
RW
891{
892 ovs_assert(xbundle->xbridge);
893
894 xbundle->vlan_mode = vlan_mode;
fed8962a 895 xbundle->qinq_ethtype = qinq_ethtype;
84f0f298
RW
896 xbundle->vlan = vlan;
897 xbundle->trunks = trunks;
fed8962a 898 xbundle->cvlans = cvlans;
84f0f298
RW
899 xbundle->use_priority_tags = use_priority_tags;
900 xbundle->floodable = floodable;
c005f976 901 xbundle->protected = protected;
84f0f298
RW
902
903 if (xbundle->bond != bond) {
904 bond_unref(xbundle->bond);
905 xbundle->bond = bond_ref(bond);
906 }
907
908 if (xbundle->lacp != lacp) {
909 lacp_unref(xbundle->lacp);
910 xbundle->lacp = lacp_ref(lacp);
911 }
912}
913
914static void
915xlate_xport_set(struct xport *xport, odp_port_t odp_port,
916 const struct netdev *netdev, const struct cfm *cfm,
0477baa9 917 const struct bfd *bfd, const struct lldp *lldp, int stp_port_no,
f025bcb7 918 const struct rstp_port* rstp_port,
84f0f298
RW
919 enum ofputil_port_config config, enum ofputil_port_state state,
920 bool is_tunnel, bool may_enable)
921{
922 xport->config = config;
923 xport->state = state;
924 xport->stp_port_no = stp_port_no;
925 xport->is_tunnel = is_tunnel;
875ab130 926 xport->pt_mode = netdev_get_pt_mode(netdev);
84f0f298
RW
927 xport->may_enable = may_enable;
928 xport->odp_port = odp_port;
929
f025bcb7
JR
930 if (xport->rstp_port != rstp_port) {
931 rstp_port_unref(xport->rstp_port);
932 xport->rstp_port = rstp_port_ref(rstp_port);
933 }
934
84f0f298
RW
935 if (xport->cfm != cfm) {
936 cfm_unref(xport->cfm);
937 xport->cfm = cfm_ref(cfm);
938 }
939
940 if (xport->bfd != bfd) {
941 bfd_unref(xport->bfd);
942 xport->bfd = bfd_ref(bfd);
943 }
944
0477baa9
DF
945 if (xport->lldp != lldp) {
946 lldp_unref(xport->lldp);
947 xport->lldp = lldp_ref(lldp);
948 }
949
84f0f298
RW
950 if (xport->netdev != netdev) {
951 netdev_close(xport->netdev);
952 xport->netdev = netdev_ref(netdev);
953 }
954}
955
956static void
957xlate_xbridge_copy(struct xbridge *xbridge)
958{
959 struct xbundle *xbundle;
960 struct xport *xport;
961 struct xbridge *new_xbridge = xzalloc(sizeof *xbridge);
962 new_xbridge->ofproto = xbridge->ofproto;
963 new_xbridge->name = xstrdup(xbridge->name);
964 xlate_xbridge_init(new_xcfg, new_xbridge);
965
966 xlate_xbridge_set(new_xbridge,
34dd0d78 967 xbridge->dpif, xbridge->ml, xbridge->stp,
9efd308e
DV
968 xbridge->rstp, xbridge->ms, xbridge->mbridge,
969 xbridge->sflow, xbridge->ipfix, xbridge->netflow,
b440dd8c
JS
970 xbridge->forward_bpdu, xbridge->has_in_band,
971 &xbridge->support);
84f0f298
RW
972 LIST_FOR_EACH (xbundle, list_node, &xbridge->xbundles) {
973 xlate_xbundle_copy(new_xbridge, xbundle);
974 }
975
976 /* Copy xports which are not part of a xbundle */
977 HMAP_FOR_EACH (xport, ofp_node, &xbridge->xports) {
978 if (!xport->xbundle) {
979 xlate_xport_copy(new_xbridge, NULL, xport);
980 }
981 }
982}
983
984static void
985xlate_xbundle_copy(struct xbridge *xbridge, struct xbundle *xbundle)
986{
987 struct xport *xport;
988 struct xbundle *new_xbundle = xzalloc(sizeof *xbundle);
989 new_xbundle->ofbundle = xbundle->ofbundle;
990 new_xbundle->xbridge = xbridge;
991 new_xbundle->name = xstrdup(xbundle->name);
992 xlate_xbundle_init(new_xcfg, new_xbundle);
993
fed8962a
EG
994 xlate_xbundle_set(new_xbundle, xbundle->vlan_mode, xbundle->qinq_ethtype,
995 xbundle->vlan, xbundle->trunks, xbundle->cvlans,
84f0f298 996 xbundle->use_priority_tags, xbundle->bond, xbundle->lacp,
c005f976 997 xbundle->floodable, xbundle->protected);
84f0f298
RW
998 LIST_FOR_EACH (xport, bundle_node, &xbundle->xports) {
999 xlate_xport_copy(xbridge, new_xbundle, xport);
1000 }
1001}
1002
1003static void
1004xlate_xport_copy(struct xbridge *xbridge, struct xbundle *xbundle,
1005 struct xport *xport)
1006{
1007 struct skb_priority_to_dscp *pdscp, *new_pdscp;
1008 struct xport *new_xport = xzalloc(sizeof *xport);
1009 new_xport->ofport = xport->ofport;
1010 new_xport->ofp_port = xport->ofp_port;
1011 new_xport->xbridge = xbridge;
1012 xlate_xport_init(new_xcfg, new_xport);
1013
1014 xlate_xport_set(new_xport, xport->odp_port, xport->netdev, xport->cfm,
0477baa9
DF
1015 xport->bfd, xport->lldp, xport->stp_port_no,
1016 xport->rstp_port, xport->config, xport->state,
1017 xport->is_tunnel, xport->may_enable);
84f0f298
RW
1018
1019 if (xport->peer) {
1020 struct xport *peer = xport_lookup(new_xcfg, xport->peer->ofport);
1021 if (peer) {
1022 new_xport->peer = peer;
1023 new_xport->peer->peer = new_xport;
1024 }
1025 }
1026
1027 if (xbundle) {
1028 new_xport->xbundle = xbundle;
417e7e66 1029 ovs_list_insert(&new_xport->xbundle->xports, &new_xport->bundle_node);
84f0f298
RW
1030 }
1031
1032 HMAP_FOR_EACH (pdscp, hmap_node, &xport->skb_priorities) {
1033 new_pdscp = xmalloc(sizeof *pdscp);
1034 new_pdscp->skb_priority = pdscp->skb_priority;
1035 new_pdscp->dscp = pdscp->dscp;
1036 hmap_insert(&new_xport->skb_priorities, &new_pdscp->hmap_node,
1037 hash_int(new_pdscp->skb_priority, 0));
1038 }
1039}
1040
1041/* Sets the current xlate configuration to new_xcfg and frees the old xlate
1042 * configuration in xcfgp.
1043 *
1044 * This needs to be called after editing the xlate configuration.
1045 *
1046 * Functions that edit the new xlate configuration are
6cd20a22 1047 * xlate_<ofproto/bundle/ofport>_set and xlate_<ofproto/bundle/ofport>_remove.
84f0f298
RW
1048 *
1049 * A sample workflow:
1050 *
1051 * xlate_txn_start();
1052 * ...
1053 * edit_xlate_configuration();
1054 * ...
1055 * xlate_txn_commit(); */
46c88433 1056void
84f0f298
RW
1057xlate_txn_commit(void)
1058{
1059 struct xlate_cfg *xcfg = ovsrcu_get(struct xlate_cfg *, &xcfgp);
1060
1061 ovsrcu_set(&xcfgp, new_xcfg);
40a9c4c2
AW
1062 ovsrcu_synchronize();
1063 xlate_xcfg_free(xcfg);
84f0f298
RW
1064 new_xcfg = NULL;
1065}
1066
1067/* Copies the current xlate configuration in xcfgp to new_xcfg.
1068 *
1069 * This needs to be called prior to editing the xlate configuration. */
1070void
1071xlate_txn_start(void)
1072{
1073 struct xbridge *xbridge;
1074 struct xlate_cfg *xcfg;
1075
1076 ovs_assert(!new_xcfg);
1077
1078 new_xcfg = xmalloc(sizeof *new_xcfg);
1079 hmap_init(&new_xcfg->xbridges);
1080 hmap_init(&new_xcfg->xbundles);
1081 hmap_init(&new_xcfg->xports);
1082
1083 xcfg = ovsrcu_get(struct xlate_cfg *, &xcfgp);
1084 if (!xcfg) {
1085 return;
1086 }
1087
1088 HMAP_FOR_EACH (xbridge, hmap_node, &xcfg->xbridges) {
1089 xlate_xbridge_copy(xbridge);
1090 }
1091}
1092
1093
1094static void
1095xlate_xcfg_free(struct xlate_cfg *xcfg)
1096{
1097 struct xbridge *xbridge, *next_xbridge;
1098
1099 if (!xcfg) {
1100 return;
1101 }
1102
1103 HMAP_FOR_EACH_SAFE (xbridge, next_xbridge, hmap_node, &xcfg->xbridges) {
1104 xlate_xbridge_remove(xcfg, xbridge);
1105 }
1106
1107 hmap_destroy(&xcfg->xbridges);
1108 hmap_destroy(&xcfg->xbundles);
1109 hmap_destroy(&xcfg->xports);
1110 free(xcfg);
1111}
1112
1113void
1114xlate_ofproto_set(struct ofproto_dpif *ofproto, const char *name,
34dd0d78 1115 struct dpif *dpif,
84f0f298 1116 const struct mac_learning *ml, struct stp *stp,
9efd308e 1117 struct rstp *rstp, const struct mcast_snooping *ms,
84f0f298
RW
1118 const struct mbridge *mbridge,
1119 const struct dpif_sflow *sflow,
1120 const struct dpif_ipfix *ipfix,
2f47cdf4 1121 const struct netflow *netflow,
b440dd8c
JS
1122 bool forward_bpdu, bool has_in_band,
1123 const struct dpif_backer_support *support)
84f0f298
RW
1124{
1125 struct xbridge *xbridge;
1126
1127 ovs_assert(new_xcfg);
1128
1129 xbridge = xbridge_lookup(new_xcfg, ofproto);
1130 if (!xbridge) {
1131 xbridge = xzalloc(sizeof *xbridge);
1132 xbridge->ofproto = ofproto;
1133
1134 xlate_xbridge_init(new_xcfg, xbridge);
1135 }
1136
1137 free(xbridge->name);
1138 xbridge->name = xstrdup(name);
1139
34dd0d78 1140 xlate_xbridge_set(xbridge, dpif, ml, stp, rstp, ms, mbridge, sflow, ipfix,
b440dd8c 1141 netflow, forward_bpdu, has_in_band, support);
84f0f298
RW
1142}
1143
1144static void
1145xlate_xbridge_remove(struct xlate_cfg *xcfg, struct xbridge *xbridge)
46c88433 1146{
46c88433
EJ
1147 struct xbundle *xbundle, *next_xbundle;
1148 struct xport *xport, *next_xport;
1149
1150 if (!xbridge) {
1151 return;
1152 }
1153
1154 HMAP_FOR_EACH_SAFE (xport, next_xport, ofp_node, &xbridge->xports) {
84f0f298 1155 xlate_xport_remove(xcfg, xport);
46c88433
EJ
1156 }
1157
1158 LIST_FOR_EACH_SAFE (xbundle, next_xbundle, list_node, &xbridge->xbundles) {
84f0f298 1159 xlate_xbundle_remove(xcfg, xbundle);
46c88433
EJ
1160 }
1161
84f0f298 1162 hmap_remove(&xcfg->xbridges, &xbridge->hmap_node);
795cc5c1 1163 mac_learning_unref(xbridge->ml);
6d95c4e8 1164 mcast_snooping_unref(xbridge->ms);
795cc5c1
EJ
1165 mbridge_unref(xbridge->mbridge);
1166 dpif_sflow_unref(xbridge->sflow);
1167 dpif_ipfix_unref(xbridge->ipfix);
3570f7e4 1168 netflow_unref(xbridge->netflow);
795cc5c1 1169 stp_unref(xbridge->stp);
9efd308e 1170 rstp_unref(xbridge->rstp);
795cc5c1 1171 hmap_destroy(&xbridge->xports);
46c88433
EJ
1172 free(xbridge->name);
1173 free(xbridge);
1174}
1175
84f0f298
RW
1176void
1177xlate_remove_ofproto(struct ofproto_dpif *ofproto)
1178{
1179 struct xbridge *xbridge;
1180
1181 ovs_assert(new_xcfg);
1182
1183 xbridge = xbridge_lookup(new_xcfg, ofproto);
1184 xlate_xbridge_remove(new_xcfg, xbridge);
1185}
1186
46c88433
EJ
1187void
1188xlate_bundle_set(struct ofproto_dpif *ofproto, struct ofbundle *ofbundle,
f0fb825a 1189 const char *name, enum port_vlan_mode vlan_mode,
fed8962a
EG
1190 uint16_t qinq_ethtype, int vlan,
1191 unsigned long *trunks, unsigned long *cvlans,
f0fb825a 1192 bool use_priority_tags,
46c88433 1193 const struct bond *bond, const struct lacp *lacp,
c005f976 1194 bool floodable, bool protected)
46c88433 1195{
84f0f298 1196 struct xbundle *xbundle;
46c88433 1197
84f0f298
RW
1198 ovs_assert(new_xcfg);
1199
1200 xbundle = xbundle_lookup(new_xcfg, ofbundle);
46c88433
EJ
1201 if (!xbundle) {
1202 xbundle = xzalloc(sizeof *xbundle);
1203 xbundle->ofbundle = ofbundle;
84f0f298 1204 xbundle->xbridge = xbridge_lookup(new_xcfg, ofproto);
46c88433 1205
84f0f298 1206 xlate_xbundle_init(new_xcfg, xbundle);
46c88433
EJ
1207 }
1208
46c88433
EJ
1209 free(xbundle->name);
1210 xbundle->name = xstrdup(name);
1211
fed8962a 1212 xlate_xbundle_set(xbundle, vlan_mode, qinq_ethtype, vlan, trunks, cvlans,
c005f976 1213 use_priority_tags, bond, lacp, floodable, protected);
46c88433
EJ
1214}
1215
84f0f298
RW
1216static void
1217xlate_xbundle_remove(struct xlate_cfg *xcfg, struct xbundle *xbundle)
46c88433 1218{
5f03c983 1219 struct xport *xport;
46c88433
EJ
1220
1221 if (!xbundle) {
1222 return;
1223 }
1224
5f03c983 1225 LIST_FOR_EACH_POP (xport, bundle_node, &xbundle->xports) {
46c88433
EJ
1226 xport->xbundle = NULL;
1227 }
1228
84f0f298 1229 hmap_remove(&xcfg->xbundles, &xbundle->hmap_node);
417e7e66 1230 ovs_list_remove(&xbundle->list_node);
46c88433
EJ
1231 bond_unref(xbundle->bond);
1232 lacp_unref(xbundle->lacp);
1233 free(xbundle->name);
1234 free(xbundle);
1235}
1236
84f0f298
RW
1237void
1238xlate_bundle_remove(struct ofbundle *ofbundle)
1239{
1240 struct xbundle *xbundle;
1241
1242 ovs_assert(new_xcfg);
1243
1244 xbundle = xbundle_lookup(new_xcfg, ofbundle);
1245 xlate_xbundle_remove(new_xcfg, xbundle);
1246}
1247
46c88433
EJ
1248void
1249xlate_ofport_set(struct ofproto_dpif *ofproto, struct ofbundle *ofbundle,
1250 struct ofport_dpif *ofport, ofp_port_t ofp_port,
1251 odp_port_t odp_port, const struct netdev *netdev,
1252 const struct cfm *cfm, const struct bfd *bfd,
0477baa9
DF
1253 const struct lldp *lldp, struct ofport_dpif *peer,
1254 int stp_port_no, const struct rstp_port *rstp_port,
55954f6e 1255 const struct ofproto_port_queue *qdscp_list, size_t n_qdscp,
dd8cd4b4
SH
1256 enum ofputil_port_config config,
1257 enum ofputil_port_state state, bool is_tunnel,
9d189a50 1258 bool may_enable)
46c88433 1259{
55954f6e 1260 size_t i;
84f0f298
RW
1261 struct xport *xport;
1262
1263 ovs_assert(new_xcfg);
46c88433 1264
84f0f298 1265 xport = xport_lookup(new_xcfg, ofport);
46c88433
EJ
1266 if (!xport) {
1267 xport = xzalloc(sizeof *xport);
1268 xport->ofport = ofport;
84f0f298 1269 xport->xbridge = xbridge_lookup(new_xcfg, ofproto);
46c88433
EJ
1270 xport->ofp_port = ofp_port;
1271
84f0f298 1272 xlate_xport_init(new_xcfg, xport);
46c88433
EJ
1273 }
1274
1275 ovs_assert(xport->ofp_port == ofp_port);
1276
0477baa9
DF
1277 xlate_xport_set(xport, odp_port, netdev, cfm, bfd, lldp,
1278 stp_port_no, rstp_port, config, state, is_tunnel,
1279 may_enable);
46c88433
EJ
1280
1281 if (xport->peer) {
1282 xport->peer->peer = NULL;
1283 }
84f0f298 1284 xport->peer = xport_lookup(new_xcfg, peer);
46c88433
EJ
1285 if (xport->peer) {
1286 xport->peer->peer = xport;
1287 }
1288
1289 if (xport->xbundle) {
417e7e66 1290 ovs_list_remove(&xport->bundle_node);
46c88433 1291 }
84f0f298 1292 xport->xbundle = xbundle_lookup(new_xcfg, ofbundle);
46c88433 1293 if (xport->xbundle) {
417e7e66 1294 ovs_list_insert(&xport->xbundle->xports, &xport->bundle_node);
46c88433 1295 }
55954f6e
EJ
1296
1297 clear_skb_priorities(xport);
1298 for (i = 0; i < n_qdscp; i++) {
1299 struct skb_priority_to_dscp *pdscp;
1300 uint32_t skb_priority;
1301
89a8a7f0
EJ
1302 if (dpif_queue_to_priority(xport->xbridge->dpif, qdscp_list[i].queue,
1303 &skb_priority)) {
55954f6e
EJ
1304 continue;
1305 }
1306
1307 pdscp = xmalloc(sizeof *pdscp);
1308 pdscp->skb_priority = skb_priority;
1309 pdscp->dscp = (qdscp_list[i].dscp << 2) & IP_DSCP_MASK;
1310 hmap_insert(&xport->skb_priorities, &pdscp->hmap_node,
1311 hash_int(pdscp->skb_priority, 0));
1312 }
46c88433
EJ
1313}
1314
84f0f298
RW
1315static void
1316xlate_xport_remove(struct xlate_cfg *xcfg, struct xport *xport)
46c88433 1317{
46c88433
EJ
1318 if (!xport) {
1319 return;
1320 }
1321
1322 if (xport->peer) {
1323 xport->peer->peer = NULL;
1324 xport->peer = NULL;
1325 }
1326
e621a12d 1327 if (xport->xbundle) {
417e7e66 1328 ovs_list_remove(&xport->bundle_node);
e621a12d
EJ
1329 }
1330
55954f6e
EJ
1331 clear_skb_priorities(xport);
1332 hmap_destroy(&xport->skb_priorities);
1333
84f0f298 1334 hmap_remove(&xcfg->xports, &xport->hmap_node);
46c88433
EJ
1335 hmap_remove(&xport->xbridge->xports, &xport->ofp_node);
1336
1337 netdev_close(xport->netdev);
f025bcb7 1338 rstp_port_unref(xport->rstp_port);
46c88433
EJ
1339 cfm_unref(xport->cfm);
1340 bfd_unref(xport->bfd);
0477baa9 1341 lldp_unref(xport->lldp);
46c88433
EJ
1342 free(xport);
1343}
1344
84f0f298
RW
1345void
1346xlate_ofport_remove(struct ofport_dpif *ofport)
1347{
1348 struct xport *xport;
1349
1350 ovs_assert(new_xcfg);
1351
1352 xport = xport_lookup(new_xcfg, ofport);
1353 xlate_xport_remove(new_xcfg, xport);
1354}
1355
ef377a58
JR
1356static struct ofproto_dpif *
1357xlate_lookup_ofproto_(const struct dpif_backer *backer, const struct flow *flow,
1358 ofp_port_t *ofp_in_port, const struct xport **xportp)
1359{
e672ff9b 1360 struct xlate_cfg *xcfg = ovsrcu_get(struct xlate_cfg *, &xcfgp);
ef377a58 1361 const struct xport *xport;
f9038ef6 1362
e672ff9b
JR
1363 xport = xport_lookup(xcfg, tnl_port_should_receive(flow)
1364 ? tnl_port_receive(flow)
1365 : odp_port_to_ofport(backer, flow->in_port.odp_port));
1366 if (OVS_UNLIKELY(!xport)) {
1367 return NULL;
ef377a58 1368 }
e672ff9b 1369 *xportp = xport;
f9038ef6 1370 if (ofp_in_port) {
e672ff9b 1371 *ofp_in_port = xport->ofp_port;
f9038ef6 1372 }
e672ff9b 1373 return xport->xbridge->ofproto;
ef377a58
JR
1374}
1375
1376/* Given a datapath and flow metadata ('backer', and 'flow' respectively)
1377 * returns the corresponding struct ofproto_dpif and OpenFlow port number. */
1378struct ofproto_dpif *
1379xlate_lookup_ofproto(const struct dpif_backer *backer, const struct flow *flow,
1380 ofp_port_t *ofp_in_port)
1381{
1382 const struct xport *xport;
1383
1384 return xlate_lookup_ofproto_(backer, flow, ofp_in_port, &xport);
1385}
1386
cc377352 1387/* Given a datapath and flow metadata ('backer', and 'flow' respectively),
ef377a58 1388 * optionally populates 'ofproto' with the ofproto_dpif, 'ofp_in_port' with the
cc377352 1389 * openflow in_port, and 'ipfix', 'sflow', and 'netflow' with the appropriate
dcc2c6cd
JR
1390 * handles for those protocols if they're enabled. Caller may use the returned
1391 * pointers until quiescing, for longer term use additional references must
1392 * be taken.
8449c4d6 1393 *
f9038ef6 1394 * Returns 0 if successful, ENODEV if the parsed flow has no associated ofproto.
ef377a58 1395 */
8449c4d6 1396int
5c476ea3
JR
1397xlate_lookup(const struct dpif_backer *backer, const struct flow *flow,
1398 struct ofproto_dpif **ofprotop, struct dpif_ipfix **ipfix,
1399 struct dpif_sflow **sflow, struct netflow **netflow,
1400 ofp_port_t *ofp_in_port)
8449c4d6 1401{
ef377a58 1402 struct ofproto_dpif *ofproto;
84f0f298 1403 const struct xport *xport;
8449c4d6 1404
ef377a58 1405 ofproto = xlate_lookup_ofproto_(backer, flow, ofp_in_port, &xport);
8449c4d6 1406
f9038ef6 1407 if (!ofproto) {
cc377352 1408 return ENODEV;
8449c4d6 1409 }
8449c4d6 1410
ef377a58
JR
1411 if (ofprotop) {
1412 *ofprotop = ofproto;
8449c4d6
EJ
1413 }
1414
1dfdb9b3 1415 if (ipfix) {
f9038ef6 1416 *ipfix = xport ? xport->xbridge->ipfix : NULL;
1dfdb9b3
EJ
1417 }
1418
1419 if (sflow) {
f9038ef6 1420 *sflow = xport ? xport->xbridge->sflow : NULL;
1dfdb9b3
EJ
1421 }
1422
1423 if (netflow) {
f9038ef6 1424 *netflow = xport ? xport->xbridge->netflow : NULL;
1dfdb9b3 1425 }
f9038ef6 1426
cc377352 1427 return 0;
8449c4d6
EJ
1428}
1429
46c88433 1430static struct xbridge *
84f0f298 1431xbridge_lookup(struct xlate_cfg *xcfg, const struct ofproto_dpif *ofproto)
46c88433 1432{
84f0f298 1433 struct hmap *xbridges;
46c88433
EJ
1434 struct xbridge *xbridge;
1435
84f0f298 1436 if (!ofproto || !xcfg) {
5e6af486
EJ
1437 return NULL;
1438 }
1439
84f0f298
RW
1440 xbridges = &xcfg->xbridges;
1441
46c88433 1442 HMAP_FOR_EACH_IN_BUCKET (xbridge, hmap_node, hash_pointer(ofproto, 0),
84f0f298 1443 xbridges) {
46c88433
EJ
1444 if (xbridge->ofproto == ofproto) {
1445 return xbridge;
1446 }
1447 }
1448 return NULL;
1449}
1450
290835f9
BP
1451static struct xbridge *
1452xbridge_lookup_by_uuid(struct xlate_cfg *xcfg, const struct uuid *uuid)
1453{
1454 struct xbridge *xbridge;
1455
1456 HMAP_FOR_EACH (xbridge, hmap_node, &xcfg->xbridges) {
07a3cd5c 1457 if (uuid_equals(&xbridge->ofproto->uuid, uuid)) {
290835f9
BP
1458 return xbridge;
1459 }
1460 }
1461 return NULL;
1462}
1463
46c88433 1464static struct xbundle *
84f0f298 1465xbundle_lookup(struct xlate_cfg *xcfg, const struct ofbundle *ofbundle)
46c88433 1466{
84f0f298 1467 struct hmap *xbundles;
46c88433
EJ
1468 struct xbundle *xbundle;
1469
84f0f298 1470 if (!ofbundle || !xcfg) {
5e6af486
EJ
1471 return NULL;
1472 }
1473
84f0f298
RW
1474 xbundles = &xcfg->xbundles;
1475
46c88433 1476 HMAP_FOR_EACH_IN_BUCKET (xbundle, hmap_node, hash_pointer(ofbundle, 0),
84f0f298 1477 xbundles) {
46c88433
EJ
1478 if (xbundle->ofbundle == ofbundle) {
1479 return xbundle;
1480 }
1481 }
1482 return NULL;
1483}
1484
1485static struct xport *
84f0f298 1486xport_lookup(struct xlate_cfg *xcfg, const struct ofport_dpif *ofport)
46c88433 1487{
84f0f298 1488 struct hmap *xports;
46c88433
EJ
1489 struct xport *xport;
1490
84f0f298 1491 if (!ofport || !xcfg) {
5e6af486
EJ
1492 return NULL;
1493 }
1494
84f0f298
RW
1495 xports = &xcfg->xports;
1496
46c88433 1497 HMAP_FOR_EACH_IN_BUCKET (xport, hmap_node, hash_pointer(ofport, 0),
84f0f298 1498 xports) {
46c88433
EJ
1499 if (xport->ofport == ofport) {
1500 return xport;
1501 }
1502 }
1503 return NULL;
1504}
1505
40085e56
EJ
1506static struct stp_port *
1507xport_get_stp_port(const struct xport *xport)
1508{
92cf817b 1509 return xport->xbridge->stp && xport->stp_port_no != -1
40085e56
EJ
1510 ? stp_get_port(xport->xbridge->stp, xport->stp_port_no)
1511 : NULL;
1512}
9d189a50 1513
0d1cee12 1514static bool
9d189a50
EJ
1515xport_stp_learn_state(const struct xport *xport)
1516{
40085e56 1517 struct stp_port *sp = xport_get_stp_port(xport);
4b5f1996
DV
1518 return sp
1519 ? stp_learn_in_state(stp_port_get_state(sp))
1520 : true;
9d189a50
EJ
1521}
1522
1523static bool
1524xport_stp_forward_state(const struct xport *xport)
1525{
40085e56 1526 struct stp_port *sp = xport_get_stp_port(xport);
4b5f1996
DV
1527 return sp
1528 ? stp_forward_in_state(stp_port_get_state(sp))
1529 : true;
9d189a50
EJ
1530}
1531
0d1cee12 1532static bool
bacdb85a 1533xport_stp_should_forward_bpdu(const struct xport *xport)
0d1cee12
K
1534{
1535 struct stp_port *sp = xport_get_stp_port(xport);
bacdb85a 1536 return stp_should_forward_bpdu(sp ? stp_port_get_state(sp) : STP_DISABLED);
0d1cee12
K
1537}
1538
9d189a50
EJ
1539/* Returns true if STP should process 'flow'. Sets fields in 'wc' that
1540 * were used to make the determination.*/
1541static bool
1542stp_should_process_flow(const struct flow *flow, struct flow_wildcards *wc)
1543{
bbbca389 1544 /* is_stp() also checks dl_type, but dl_type is always set in 'wc'. */
9d189a50 1545 memset(&wc->masks.dl_dst, 0xff, sizeof wc->masks.dl_dst);
bbbca389 1546 return is_stp(flow);
9d189a50
EJ
1547}
1548
1549static void
cf62fa4c 1550stp_process_packet(const struct xport *xport, const struct dp_packet *packet)
9d189a50 1551{
40085e56 1552 struct stp_port *sp = xport_get_stp_port(xport);
cf62fa4c
PS
1553 struct dp_packet payload = *packet;
1554 struct eth_header *eth = dp_packet_data(&payload);
9d189a50
EJ
1555
1556 /* Sink packets on ports that have STP disabled when the bridge has
1557 * STP enabled. */
1558 if (!sp || stp_port_get_state(sp) == STP_DISABLED) {
1559 return;
1560 }
1561
1562 /* Trim off padding on payload. */
cf62fa4c
PS
1563 if (dp_packet_size(&payload) > ntohs(eth->eth_type) + ETH_HEADER_LEN) {
1564 dp_packet_set_size(&payload, ntohs(eth->eth_type) + ETH_HEADER_LEN);
9d189a50
EJ
1565 }
1566
cf62fa4c
PS
1567 if (dp_packet_try_pull(&payload, ETH_HEADER_LEN + LLC_HEADER_LEN)) {
1568 stp_received_bpdu(sp, dp_packet_data(&payload), dp_packet_size(&payload));
9d189a50
EJ
1569 }
1570}
1571
f025bcb7
JR
1572static enum rstp_state
1573xport_get_rstp_port_state(const struct xport *xport)
9efd308e 1574{
f025bcb7
JR
1575 return xport->rstp_port
1576 ? rstp_port_get_state(xport->rstp_port)
1577 : RSTP_DISABLED;
9efd308e
DV
1578}
1579
1580static bool
1581xport_rstp_learn_state(const struct xport *xport)
1582{
4b5f1996
DV
1583 return xport->xbridge->rstp && xport->rstp_port
1584 ? rstp_learn_in_state(xport_get_rstp_port_state(xport))
1585 : true;
9efd308e
DV
1586}
1587
1588static bool
1589xport_rstp_forward_state(const struct xport *xport)
1590{
4b5f1996
DV
1591 return xport->xbridge->rstp && xport->rstp_port
1592 ? rstp_forward_in_state(xport_get_rstp_port_state(xport))
1593 : true;
9efd308e
DV
1594}
1595
1596static bool
1597xport_rstp_should_manage_bpdu(const struct xport *xport)
1598{
f025bcb7 1599 return rstp_should_manage_bpdu(xport_get_rstp_port_state(xport));
9efd308e
DV
1600}
1601
1602static void
cf62fa4c 1603rstp_process_packet(const struct xport *xport, const struct dp_packet *packet)
9efd308e 1604{
cf62fa4c
PS
1605 struct dp_packet payload = *packet;
1606 struct eth_header *eth = dp_packet_data(&payload);
9efd308e 1607
f025bcb7
JR
1608 /* Sink packets on ports that have no RSTP. */
1609 if (!xport->rstp_port) {
9efd308e
DV
1610 return;
1611 }
1612
1613 /* Trim off padding on payload. */
cf62fa4c
PS
1614 if (dp_packet_size(&payload) > ntohs(eth->eth_type) + ETH_HEADER_LEN) {
1615 dp_packet_set_size(&payload, ntohs(eth->eth_type) + ETH_HEADER_LEN);
9efd308e
DV
1616 }
1617
cf62fa4c
PS
1618 if (dp_packet_try_pull(&payload, ETH_HEADER_LEN + LLC_HEADER_LEN)) {
1619 rstp_port_received_bpdu(xport->rstp_port, dp_packet_data(&payload),
1620 dp_packet_size(&payload));
9efd308e
DV
1621 }
1622}
1623
46c88433
EJ
1624static struct xport *
1625get_ofp_port(const struct xbridge *xbridge, ofp_port_t ofp_port)
1626{
1627 struct xport *xport;
1628
1629 HMAP_FOR_EACH_IN_BUCKET (xport, ofp_node, hash_ofp_port(ofp_port),
1630 &xbridge->xports) {
1631 if (xport->ofp_port == ofp_port) {
1632 return xport;
1633 }
1634 }
1635 return NULL;
1636}
1637
1638static odp_port_t
1639ofp_port_to_odp_port(const struct xbridge *xbridge, ofp_port_t ofp_port)
1640{
1641 const struct xport *xport = get_ofp_port(xbridge, ofp_port);
1642 return xport ? xport->odp_port : ODPP_NONE;
1643}
1644
dd8cd4b4
SH
1645static bool
1646odp_port_is_alive(const struct xlate_ctx *ctx, ofp_port_t ofp_port)
1647{
086fa873
BP
1648 struct xport *xport = get_ofp_port(ctx->xbridge, ofp_port);
1649 return xport && xport->may_enable;
dd8cd4b4
SH
1650}
1651
1e684d7d 1652static struct ofputil_bucket *
dd8cd4b4
SH
1653group_first_live_bucket(const struct xlate_ctx *, const struct group_dpif *,
1654 int depth);
1655
1656static bool
1657group_is_alive(const struct xlate_ctx *ctx, uint32_t group_id, int depth)
1658{
1659 struct group_dpif *group;
dd8cd4b4 1660
5d08a275 1661 group = group_dpif_lookup(ctx->xbridge->ofproto, group_id,
1f4a8933 1662 ctx->xin->tables_version, false);
db88b35c 1663 if (group) {
76973237 1664 return group_first_live_bucket(ctx, group, depth) != NULL;
dc25893e 1665 }
dd8cd4b4 1666
dc25893e 1667 return false;
dd8cd4b4
SH
1668}
1669
1670#define MAX_LIVENESS_RECURSION 128 /* Arbitrary limit */
1671
1672static bool
1673bucket_is_alive(const struct xlate_ctx *ctx,
1e684d7d 1674 struct ofputil_bucket *bucket, int depth)
dd8cd4b4
SH
1675{
1676 if (depth >= MAX_LIVENESS_RECURSION) {
2d9b49dd
BP
1677 xlate_report_error(ctx, "bucket chaining exceeded %d links",
1678 MAX_LIVENESS_RECURSION);
dd8cd4b4
SH
1679 return false;
1680 }
1681
fdb1999b
AZ
1682 return (!ofputil_bucket_has_liveness(bucket)
1683 || (bucket->watch_port != OFPP_ANY
1684 && odp_port_is_alive(ctx, bucket->watch_port))
1685 || (bucket->watch_group != OFPG_ANY
1686 && group_is_alive(ctx, bucket->watch_group, depth + 1)));
dd8cd4b4
SH
1687}
1688
1e684d7d 1689static struct ofputil_bucket *
dd8cd4b4
SH
1690group_first_live_bucket(const struct xlate_ctx *ctx,
1691 const struct group_dpif *group, int depth)
1692{
1693 struct ofputil_bucket *bucket;
07a3cd5c 1694 LIST_FOR_EACH (bucket, list_node, &group->up.buckets) {
dd8cd4b4
SH
1695 if (bucket_is_alive(ctx, bucket, depth)) {
1696 return bucket;
1697 }
1698 }
1699
1700 return NULL;
1701}
1702
1e684d7d 1703static struct ofputil_bucket *
fe7e5749
SH
1704group_best_live_bucket(const struct xlate_ctx *ctx,
1705 const struct group_dpif *group,
1706 uint32_t basis)
1707{
1e684d7d 1708 struct ofputil_bucket *best_bucket = NULL;
fe7e5749 1709 uint32_t best_score = 0;
fe7e5749 1710
1e684d7d 1711 struct ofputil_bucket *bucket;
07a3cd5c 1712 LIST_FOR_EACH (bucket, list_node, &group->up.buckets) {
fe7e5749 1713 if (bucket_is_alive(ctx, bucket, 0)) {
c09cb861
LS
1714 uint32_t score =
1715 (hash_int(bucket->bucket_id, basis) & 0xffff) * bucket->weight;
fe7e5749
SH
1716 if (score >= best_score) {
1717 best_bucket = bucket;
1718 best_score = score;
1719 }
1720 }
fe7e5749
SH
1721 }
1722
1723 return best_bucket;
1724}
1725
9583bc14 1726static bool
46c88433 1727xbundle_trunks_vlan(const struct xbundle *bundle, uint16_t vlan)
9583bc14
EJ
1728{
1729 return (bundle->vlan_mode != PORT_VLAN_ACCESS
1730 && (!bundle->trunks || bitmap_is_set(bundle->trunks, vlan)));
1731}
1732
fed8962a
EG
1733static bool
1734xbundle_allows_cvlan(const struct xbundle *bundle, uint16_t vlan)
1735{
1736 return (!bundle->cvlans || bitmap_is_set(bundle->cvlans, vlan));
1737}
1738
9583bc14 1739static bool
f0fb825a 1740xbundle_includes_vlan(const struct xbundle *xbundle, const struct xvlan *xvlan)
46c88433 1741{
f0fb825a
EG
1742 switch (xbundle->vlan_mode) {
1743 case PORT_VLAN_ACCESS:
1744 return xvlan->v[0].vid == xbundle->vlan && xvlan->v[1].vid == 0;
1745
1746 case PORT_VLAN_TRUNK:
1747 case PORT_VLAN_NATIVE_UNTAGGED:
1748 case PORT_VLAN_NATIVE_TAGGED:
1749 return xbundle_trunks_vlan(xbundle, xvlan->v[0].vid);
1750
fed8962a
EG
1751 case PORT_VLAN_DOT1Q_TUNNEL:
1752 return xvlan->v[0].vid == xbundle->vlan &&
1753 xbundle_allows_cvlan(xbundle, xvlan->v[1].vid);
1754
f0fb825a
EG
1755 default:
1756 OVS_NOT_REACHED();
1757 }
46c88433
EJ
1758}
1759
1760static mirror_mask_t
1761xbundle_mirror_out(const struct xbridge *xbridge, struct xbundle *xbundle)
1762{
1763 return xbundle != &ofpp_none_bundle
1764 ? mirror_bundle_out(xbridge->mbridge, xbundle->ofbundle)
1765 : 0;
1766}
1767
1768static mirror_mask_t
1769xbundle_mirror_src(const struct xbridge *xbridge, struct xbundle *xbundle)
9583bc14 1770{
46c88433
EJ
1771 return xbundle != &ofpp_none_bundle
1772 ? mirror_bundle_src(xbridge->mbridge, xbundle->ofbundle)
1773 : 0;
9583bc14
EJ
1774}
1775
46c88433
EJ
1776static mirror_mask_t
1777xbundle_mirror_dst(const struct xbridge *xbridge, struct xbundle *xbundle)
9583bc14 1778{
46c88433
EJ
1779 return xbundle != &ofpp_none_bundle
1780 ? mirror_bundle_dst(xbridge->mbridge, xbundle->ofbundle)
1781 : 0;
1782}
1783
1784static struct xbundle *
2d9b49dd
BP
1785lookup_input_bundle__(const struct xbridge *xbridge,
1786 ofp_port_t in_port, struct xport **in_xportp)
46c88433
EJ
1787{
1788 struct xport *xport;
9583bc14
EJ
1789
1790 /* Find the port and bundle for the received packet. */
46c88433
EJ
1791 xport = get_ofp_port(xbridge, in_port);
1792 if (in_xportp) {
1793 *in_xportp = xport;
9583bc14 1794 }
46c88433
EJ
1795 if (xport && xport->xbundle) {
1796 return xport->xbundle;
9583bc14
EJ
1797 }
1798
6362203b
YT
1799 /* Special-case OFPP_NONE (OF1.0) and OFPP_CONTROLLER (OF1.1+),
1800 * which a controller may use as the ingress port for traffic that
1801 * it is sourcing. */
1802 if (in_port == OFPP_CONTROLLER || in_port == OFPP_NONE) {
9583bc14
EJ
1803 return &ofpp_none_bundle;
1804 }
2d9b49dd
BP
1805 return NULL;
1806}
9583bc14 1807
2d9b49dd
BP
1808static struct xbundle *
1809lookup_input_bundle(const struct xlate_ctx *ctx,
1810 ofp_port_t in_port, struct xport **in_xportp)
1811{
1812 struct xbundle *xbundle = lookup_input_bundle__(ctx->xbridge,
1813 in_port, in_xportp);
1814 if (!xbundle) {
1815 /* Odd. A few possible reasons here:
1816 *
1817 * - We deleted a port but there are still a few packets queued up
1818 * from it.
1819 *
1820 * - Someone externally added a port (e.g. "ovs-dpctl add-if") that
1821 * we don't know about.
1822 *
1823 * - The ofproto client didn't configure the port as part of a bundle.
1824 * This is particularly likely to happen if a packet was received on
1825 * the port after it was created, but before the client had a chance
1826 * to configure its bundle.
1827 */
94783c7c 1828 xlate_report_error(ctx, "received packet on unknown port %"PRIu32,
2d9b49dd 1829 in_port);
9583bc14 1830 }
2d9b49dd 1831 return xbundle;
9583bc14
EJ
1832}
1833
faa624b4
BP
1834/* Mirrors the packet represented by 'ctx' to appropriate mirror destinations,
1835 * given the packet is ingressing or egressing on 'xbundle', which has ingress
1836 * or egress (as appropriate) mirrors 'mirrors'. */
9583bc14 1837static void
7efbc3b7
BP
1838mirror_packet(struct xlate_ctx *ctx, struct xbundle *xbundle,
1839 mirror_mask_t mirrors)
9583bc14 1840{
f0fb825a
EG
1841 struct xvlan in_xvlan;
1842 struct xvlan xvlan;
1843
faa624b4
BP
1844 /* Figure out what VLAN the packet is in (because mirrors can select
1845 * packets on basis of VLAN). */
f0fb825a
EG
1846 xvlan_extract(&ctx->xin->flow, &in_xvlan);
1847 if (!input_vid_is_valid(ctx, in_xvlan.v[0].vid, xbundle)) {
9583bc14
EJ
1848 return;
1849 }
f0fb825a 1850 xvlan_input_translate(xbundle, &in_xvlan, &xvlan);
9583bc14 1851
7efbc3b7 1852 const struct xbridge *xbridge = ctx->xbridge;
9583bc14 1853
7efbc3b7
BP
1854 /* Don't mirror to destinations that we've already mirrored to. */
1855 mirrors &= ~ctx->mirrors;
9583bc14
EJ
1856 if (!mirrors) {
1857 return;
1858 }
1859
7efbc3b7
BP
1860 if (ctx->xin->resubmit_stats) {
1861 mirror_update_stats(xbridge->mbridge, mirrors,
1862 ctx->xin->resubmit_stats->n_packets,
1863 ctx->xin->resubmit_stats->n_bytes);
1864 }
1865 if (ctx->xin->xcache) {
1866 struct xc_entry *entry;
1867
1868 entry = xlate_cache_add_entry(ctx->xin->xcache, XC_MIRROR);
901a517e
JR
1869 entry->mirror.mbridge = mbridge_ref(xbridge->mbridge);
1870 entry->mirror.mirrors = mirrors;
7efbc3b7 1871 }
9583bc14 1872
faa624b4
BP
1873 /* 'mirrors' is a bit-mask of candidates for mirroring. Iterate as long as
1874 * some candidates remain. */
9583bc14 1875 while (mirrors) {
7efbc3b7 1876 const unsigned long *vlans;
ec7ceaed
EJ
1877 mirror_mask_t dup_mirrors;
1878 struct ofbundle *out;
ec7ceaed 1879 int out_vlan;
1356dbd1 1880 int snaplen;
ec7ceaed 1881
faa624b4 1882 /* Get the details of the mirror represented by the rightmost 1-bit. */
7efbc3b7 1883 bool has_mirror = mirror_get(xbridge->mbridge, raw_ctz(mirrors),
1356dbd1
WT
1884 &vlans, &dup_mirrors,
1885 &out, &snaplen, &out_vlan);
ec7ceaed
EJ
1886 ovs_assert(has_mirror);
1887
1356dbd1 1888
faa624b4
BP
1889 /* If this mirror selects on the basis of VLAN, and it does not select
1890 * 'vlan', then discard this mirror and go on to the next one. */
ec7ceaed 1891 if (vlans) {
f0fb825a 1892 ctx->wc->masks.vlans[0].tci |= htons(VLAN_CFI | VLAN_VID_MASK);
9583bc14 1893 }
f0fb825a 1894 if (vlans && !bitmap_is_set(vlans, xvlan.v[0].vid)) {
9583bc14
EJ
1895 mirrors = zero_rightmost_1bit(mirrors);
1896 continue;
1897 }
1898
faa624b4
BP
1899 /* Record the mirror, and the mirrors that output to the same
1900 * destination, so that we don't mirror to them again. This must be
1901 * done now to ensure that output_normal(), below, doesn't recursively
1902 * output to the same mirrors. */
3d6151f3 1903 ctx->mirrors |= dup_mirrors;
1356dbd1 1904 ctx->mirror_snaplen = snaplen;
faa624b4
BP
1905
1906 /* Send the packet to the mirror. */
ec7ceaed 1907 if (out) {
84f0f298
RW
1908 struct xlate_cfg *xcfg = ovsrcu_get(struct xlate_cfg *, &xcfgp);
1909 struct xbundle *out_xbundle = xbundle_lookup(xcfg, out);
46c88433 1910 if (out_xbundle) {
f0fb825a 1911 output_normal(ctx, out_xbundle, &xvlan);
46c88433 1912 }
f0fb825a 1913 } else if (xvlan.v[0].vid != out_vlan
7efbc3b7 1914 && !eth_addr_is_reserved(ctx->xin->flow.dl_dst)) {
71f21279 1915 struct xbundle *xb;
f0fb825a 1916 uint16_t old_vid = xvlan.v[0].vid;
9583bc14 1917
f0fb825a 1918 xvlan.v[0].vid = out_vlan;
71f21279
BP
1919 LIST_FOR_EACH (xb, list_node, &xbridge->xbundles) {
1920 if (xbundle_includes_vlan(xb, &xvlan)
1921 && !xbundle_mirror_out(xbridge, xb)) {
1922 output_normal(ctx, xb, &xvlan);
9583bc14
EJ
1923 }
1924 }
f0fb825a 1925 xvlan.v[0].vid = old_vid;
9583bc14 1926 }
faa624b4
BP
1927
1928 /* output_normal() could have recursively output (to different
1929 * mirrors), so make sure that we don't send duplicates. */
1930 mirrors &= ~ctx->mirrors;
1356dbd1 1931 ctx->mirror_snaplen = 0;
9583bc14
EJ
1932 }
1933}
1934
7efbc3b7
BP
1935static void
1936mirror_ingress_packet(struct xlate_ctx *ctx)
1937{
1938 if (mbridge_has_mirrors(ctx->xbridge->mbridge)) {
7efbc3b7 1939 struct xbundle *xbundle = lookup_input_bundle(
2d9b49dd 1940 ctx, ctx->xin->flow.in_port.ofp_port, NULL);
7efbc3b7
BP
1941 if (xbundle) {
1942 mirror_packet(ctx, xbundle,
1943 xbundle_mirror_src(ctx->xbridge, xbundle));
1944 }
1945 }
1946}
1947
46c88433 1948/* Checks whether a packet with the given 'vid' may ingress on 'in_xbundle'.
2d9b49dd 1949 * If so, returns true. Otherwise, returns false.
9583bc14
EJ
1950 *
1951 * 'vid' should be the VID obtained from the 802.1Q header that was received as
1952 * part of a packet (specify 0 if there was no 802.1Q header), in the range
1953 * 0...4095. */
1954static bool
2d9b49dd
BP
1955input_vid_is_valid(const struct xlate_ctx *ctx,
1956 uint16_t vid, struct xbundle *in_xbundle)
9583bc14
EJ
1957{
1958 /* Allow any VID on the OFPP_NONE port. */
46c88433 1959 if (in_xbundle == &ofpp_none_bundle) {
9583bc14
EJ
1960 return true;
1961 }
1962
46c88433 1963 switch (in_xbundle->vlan_mode) {
9583bc14
EJ
1964 case PORT_VLAN_ACCESS:
1965 if (vid) {
2d9b49dd
BP
1966 xlate_report_error(ctx, "dropping VLAN %"PRIu16" tagged "
1967 "packet received on port %s configured as VLAN "
fd13c6b5 1968 "%d access port", vid, in_xbundle->name,
2d9b49dd 1969 in_xbundle->vlan);
9583bc14
EJ
1970 return false;
1971 }
1972 return true;
1973
1974 case PORT_VLAN_NATIVE_UNTAGGED:
1975 case PORT_VLAN_NATIVE_TAGGED:
1976 if (!vid) {
1977 /* Port must always carry its native VLAN. */
1978 return true;
1979 }
1980 /* Fall through. */
1981 case PORT_VLAN_TRUNK:
f0fb825a 1982 if (!xbundle_trunks_vlan(in_xbundle, vid)) {
2d9b49dd
BP
1983 xlate_report_error(ctx, "dropping VLAN %"PRIu16" packet "
1984 "received on port %s not configured for "
1985 "trunking VLAN %"PRIu16,
1986 vid, in_xbundle->name, vid);
9583bc14
EJ
1987 return false;
1988 }
1989 return true;
1990
fed8962a
EG
1991 case PORT_VLAN_DOT1Q_TUNNEL:
1992 if (!xbundle_allows_cvlan(in_xbundle, vid)) {
1993 xlate_report_error(ctx, "dropping VLAN %"PRIu16" packet received "
1994 "on dot1q-tunnel port %s that excludes this "
1995 "VLAN", vid, in_xbundle->name);
1996 return false;
1997 }
1998 return true;
1999
9583bc14 2000 default:
428b2edd 2001 OVS_NOT_REACHED();
9583bc14
EJ
2002 }
2003
2004}
2005
f0fb825a
EG
2006static void
2007xvlan_copy(struct xvlan *dst, const struct xvlan *src)
2008{
2009 *dst = *src;
2010}
2011
2012static void
2013xvlan_pop(struct xvlan *src)
2014{
2015 memmove(&src->v[0], &src->v[1], sizeof(src->v) - sizeof(src->v[0]));
2016 memset(&src->v[FLOW_MAX_VLAN_HEADERS - 1], 0,
2017 sizeof(src->v[FLOW_MAX_VLAN_HEADERS - 1]));
2018}
2019
fed8962a
EG
2020static void
2021xvlan_push_uninit(struct xvlan *src)
2022{
2023 memmove(&src->v[1], &src->v[0], sizeof(src->v) - sizeof(src->v[0]));
2024 memset(&src->v[0], 0, sizeof(src->v[0]));
2025}
2026
f0fb825a
EG
2027/* Extract VLAN information (headers) from flow */
2028static void
2029xvlan_extract(const struct flow *flow, struct xvlan *xvlan)
2030{
2031 int i;
2032 memset(xvlan, 0, sizeof(*xvlan));
2033 for (i = 0; i < FLOW_MAX_VLAN_HEADERS; i++) {
2034 if (!eth_type_vlan(flow->vlans[i].tpid) ||
2035 !(flow->vlans[i].tci & htons(VLAN_CFI))) {
2036 break;
2037 }
2038 xvlan->v[i].tpid = ntohs(flow->vlans[i].tpid);
2039 xvlan->v[i].vid = vlan_tci_to_vid(flow->vlans[i].tci);
2040 xvlan->v[i].pcp = ntohs(flow->vlans[i].tci) & VLAN_PCP_MASK;
2041 }
2042}
2043
2044/* Put VLAN information (headers) to flow */
2045static void
2046xvlan_put(struct flow *flow, const struct xvlan *xvlan)
2047{
2048 ovs_be16 tci;
2049 int i;
2050 for (i = 0; i < FLOW_MAX_VLAN_HEADERS; i++) {
2051 tci = htons(xvlan->v[i].vid | (xvlan->v[i].pcp & VLAN_PCP_MASK));
2052 if (tci) {
2053 tci |= htons(VLAN_CFI);
2054 flow->vlans[i].tpid = xvlan->v[i].tpid ?
2055 htons(xvlan->v[i].tpid) :
2056 htons(ETH_TYPE_VLAN_8021Q);
2057 }
2058 flow->vlans[i].tci = tci;
2059 }
2060}
2061
2062/* Given 'in_xvlan', extracted from the input 802.1Q headers received as part
2063 * of a packet, and 'in_xbundle', the bundle on which the packet was received,
2064 * returns the VLANs of the packet during bridge internal processing. */
2065static void
2066xvlan_input_translate(const struct xbundle *in_xbundle,
2067 const struct xvlan *in_xvlan, struct xvlan *xvlan)
2068{
2069
2070 switch (in_xbundle->vlan_mode) {
2071 case PORT_VLAN_ACCESS:
2072 memset(xvlan, 0, sizeof(*xvlan));
2073 xvlan->v[0].tpid = in_xvlan->v[0].tpid ? in_xvlan->v[0].tpid :
2074 ETH_TYPE_VLAN_8021Q;
2075 xvlan->v[0].vid = in_xbundle->vlan;
2076 xvlan->v[0].pcp = in_xvlan->v[0].pcp;
2077 break;
2078
2079 case PORT_VLAN_TRUNK:
2080 xvlan_copy(xvlan, in_xvlan);
2081 break;
2082
2083 case PORT_VLAN_NATIVE_UNTAGGED:
2084 case PORT_VLAN_NATIVE_TAGGED:
2085 xvlan_copy(xvlan, in_xvlan);
2086 if (!in_xvlan->v[0].vid) {
2087 xvlan->v[0].tpid = in_xvlan->v[0].tpid ? in_xvlan->v[0].tpid :
2088 ETH_TYPE_VLAN_8021Q;
2089 xvlan->v[0].vid = in_xbundle->vlan;
2090 xvlan->v[0].pcp = in_xvlan->v[0].pcp;
2091 }
2092 break;
2093
fed8962a
EG
2094 case PORT_VLAN_DOT1Q_TUNNEL:
2095 xvlan_copy(xvlan, in_xvlan);
2096 xvlan_push_uninit(xvlan);
2097 xvlan->v[0].tpid = in_xbundle->qinq_ethtype;
2098 xvlan->v[0].vid = in_xbundle->vlan;
2099 xvlan->v[0].pcp = 0;
2100 break;
2101
f0fb825a
EG
2102 default:
2103 OVS_NOT_REACHED();
2104 }
2105}
2106
2107/* Given 'xvlan', the VLANs of a packet during internal processing, and
2108 * 'out_xbundle', a bundle on which the packet is to be output, returns the
2109 * VLANs that should be included in output packet. */
2110static void
2111xvlan_output_translate(const struct xbundle *out_xbundle,
2112 const struct xvlan *xvlan, struct xvlan *out_xvlan)
9583bc14 2113{
46c88433 2114 switch (out_xbundle->vlan_mode) {
9583bc14 2115 case PORT_VLAN_ACCESS:
f0fb825a
EG
2116 memset(out_xvlan, 0, sizeof(*out_xvlan));
2117 break;
9583bc14
EJ
2118
2119 case PORT_VLAN_TRUNK:
2120 case PORT_VLAN_NATIVE_TAGGED:
f0fb825a
EG
2121 xvlan_copy(out_xvlan, xvlan);
2122 break;
9583bc14
EJ
2123
2124 case PORT_VLAN_NATIVE_UNTAGGED:
f0fb825a
EG
2125 xvlan_copy(out_xvlan, xvlan);
2126 if (xvlan->v[0].vid == out_xbundle->vlan) {
2127 xvlan_pop(out_xvlan);
2128 }
2129 break;
9583bc14 2130
fed8962a
EG
2131 case PORT_VLAN_DOT1Q_TUNNEL:
2132 xvlan_copy(out_xvlan, xvlan);
2133 xvlan_pop(out_xvlan);
2134 break;
2135
9583bc14 2136 default:
428b2edd 2137 OVS_NOT_REACHED();
9583bc14
EJ
2138 }
2139}
2140
fed8962a
EG
2141/* If output xbundle is dot1q-tunnel, set mask bits of cvlan */
2142static void
2143check_and_set_cvlan_mask(struct flow_wildcards *wc,
2144 const struct xbundle *xbundle)
2145{
2146 if (xbundle->vlan_mode == PORT_VLAN_DOT1Q_TUNNEL && xbundle->cvlans) {
2147 wc->masks.vlans[1].tci = htons(0xffff);
2148 }
2149}
2150
9583bc14 2151static void
46c88433 2152output_normal(struct xlate_ctx *ctx, const struct xbundle *out_xbundle,
f0fb825a 2153 const struct xvlan *xvlan)
9583bc14 2154{
9583bc14 2155 uint16_t vid;
f0fb825a 2156 union flow_vlan_hdr old_vlans[FLOW_MAX_VLAN_HEADERS];
46c88433 2157 struct xport *xport;
e93ef1c7
JR
2158 struct xlate_bond_recirc xr;
2159 bool use_recirc = false;
f0fb825a 2160 struct xvlan out_xvlan;
9583bc14 2161
fed8962a
EG
2162 check_and_set_cvlan_mask(ctx->wc, out_xbundle);
2163
f0fb825a
EG
2164 xvlan_output_translate(out_xbundle, xvlan, &out_xvlan);
2165 if (out_xbundle->use_priority_tags) {
2166 out_xvlan.v[0].pcp = ntohs(ctx->xin->flow.vlans[0].tci) &
2167 VLAN_PCP_MASK;
2168 }
2169 vid = out_xvlan.v[0].vid;
417e7e66 2170 if (ovs_list_is_empty(&out_xbundle->xports)) {
46c88433
EJ
2171 /* Partially configured bundle with no slaves. Drop the packet. */
2172 return;
2173 } else if (!out_xbundle->bond) {
417e7e66 2174 xport = CONTAINER_OF(ovs_list_front(&out_xbundle->xports), struct xport,
46c88433 2175 bundle_node);
9583bc14 2176 } else {
84f0f298 2177 struct xlate_cfg *xcfg = ovsrcu_get(struct xlate_cfg *, &xcfgp);
49a73e0c 2178 struct flow_wildcards *wc = ctx->wc;
84f0f298 2179 struct ofport_dpif *ofport;
adcf00ba 2180
a80aba3a
AZ
2181 if (ctx->xbridge->support.odp.recirc) {
2182 /* In case recirculation is not actually in use, 'xr.recirc_id'
2183 * will be set to '0', since a valid 'recirc_id' can
82f9f1f5
AZ
2184 * not be zero. */
2185 bond_update_post_recirc_rules(out_xbundle->bond,
2186 &xr.recirc_id,
2187 &xr.hash_basis);
2188 if (xr.recirc_id) {
2189 /* Use recirculation instead of output. */
2190 use_recirc = true;
e93ef1c7 2191 xr.hash_alg = OVS_HASH_ALG_L4;
54ecb5a2
AZ
2192 /* Recirculation does not require unmasking hash fields. */
2193 wc = NULL;
adcf00ba
AZ
2194 }
2195 }
46c88433 2196
54ecb5a2
AZ
2197 ofport = bond_choose_output_slave(out_xbundle->bond,
2198 &ctx->xin->flow, wc, vid);
84f0f298 2199 xport = xport_lookup(xcfg, ofport);
46c88433
EJ
2200
2201 if (!xport) {
9583bc14
EJ
2202 /* No slaves enabled, so drop packet. */
2203 return;
2204 }
d6fc5f57 2205
e93ef1c7 2206 /* If use_recirc is set, the main thread will handle stats
b256dc52 2207 * accounting for this bond. */
e93ef1c7 2208 if (!use_recirc) {
b256dc52
JS
2209 if (ctx->xin->resubmit_stats) {
2210 bond_account(out_xbundle->bond, &ctx->xin->flow, vid,
2211 ctx->xin->resubmit_stats->n_bytes);
2212 }
2213 if (ctx->xin->xcache) {
2214 struct xc_entry *entry;
2215 struct flow *flow;
2216
2217 flow = &ctx->xin->flow;
2218 entry = xlate_cache_add_entry(ctx->xin->xcache, XC_BOND);
901a517e
JR
2219 entry->bond.bond = bond_ref(out_xbundle->bond);
2220 entry->bond.flow = xmemdup(flow, sizeof *flow);
2221 entry->bond.vid = vid;
b256dc52 2222 }
d6fc5f57 2223 }
9583bc14
EJ
2224 }
2225
f0fb825a
EG
2226 memcpy(&old_vlans, &ctx->xin->flow.vlans, sizeof(old_vlans));
2227 xvlan_put(&ctx->xin->flow, &out_xvlan);
9583bc14 2228
feee58b9
AZ
2229 compose_output_action(ctx, xport->ofp_port, use_recirc ? &xr : NULL,
2230 false);
f0fb825a 2231 memcpy(&ctx->xin->flow.vlans, &old_vlans, sizeof(old_vlans));
9583bc14
EJ
2232}
2233
2234/* A VM broadcasts a gratuitous ARP to indicate that it has resumed after
2235 * migration. Older Citrix-patched Linux DomU used gratuitous ARP replies to
2236 * indicate this; newer upstream kernels use gratuitous ARP requests. */
2237static bool
2238is_gratuitous_arp(const struct flow *flow, struct flow_wildcards *wc)
2239{
2240 if (flow->dl_type != htons(ETH_TYPE_ARP)) {
2241 return false;
2242 }
2243
2244 memset(&wc->masks.dl_dst, 0xff, sizeof wc->masks.dl_dst);
2245 if (!eth_addr_is_broadcast(flow->dl_dst)) {
2246 return false;
2247 }
2248
2249 memset(&wc->masks.nw_proto, 0xff, sizeof wc->masks.nw_proto);
2250 if (flow->nw_proto == ARP_OP_REPLY) {
2251 return true;
2252 } else if (flow->nw_proto == ARP_OP_REQUEST) {
2253 memset(&wc->masks.nw_src, 0xff, sizeof wc->masks.nw_src);
2254 memset(&wc->masks.nw_dst, 0xff, sizeof wc->masks.nw_dst);
2255
2256 return flow->nw_src == flow->nw_dst;
2257 } else {
2258 return false;
2259 }
2260}
2261
ff69c24a
FL
2262/* Determines whether packets in 'flow' within 'xbridge' should be forwarded or
2263 * dropped. Returns true if they may be forwarded, false if they should be
2264 * dropped.
2265 *
2266 * 'in_port' must be the xport that corresponds to flow->in_port.
2267 * 'in_port' must be part of a bundle (e.g. in_port->bundle must be nonnull).
2268 *
2269 * 'vlan' must be the VLAN that corresponds to flow->vlan_tci on 'in_port', as
2270 * returned by input_vid_to_vlan(). It must be a valid VLAN for 'in_port', as
2271 * checked by input_vid_is_valid().
2272 *
2273 * May also add tags to '*tags', although the current implementation only does
2274 * so in one special case.
2275 */
2276static bool
2277is_admissible(struct xlate_ctx *ctx, struct xport *in_port,
2278 uint16_t vlan)
2279{
2280 struct xbundle *in_xbundle = in_port->xbundle;
2281 const struct xbridge *xbridge = ctx->xbridge;
2282 struct flow *flow = &ctx->xin->flow;
2283
2284 /* Drop frames for reserved multicast addresses
2285 * only if forward_bpdu option is absent. */
2286 if (!xbridge->forward_bpdu && eth_addr_is_reserved(flow->dl_dst)) {
2d9b49dd
BP
2287 xlate_report(ctx, OFT_DETAIL,
2288 "packet has reserved destination MAC, dropping");
ff69c24a
FL
2289 return false;
2290 }
2291
2292 if (in_xbundle->bond) {
2293 struct mac_entry *mac;
2294
2295 switch (bond_check_admissibility(in_xbundle->bond, in_port->ofport,
2296 flow->dl_dst)) {
2297 case BV_ACCEPT:
2298 break;
2299
2300 case BV_DROP:
2d9b49dd
BP
2301 xlate_report(ctx, OFT_DETAIL,
2302 "bonding refused admissibility, dropping");
ff69c24a
FL
2303 return false;
2304
2305 case BV_DROP_IF_MOVED:
2306 ovs_rwlock_rdlock(&xbridge->ml->rwlock);
2307 mac = mac_learning_lookup(xbridge->ml, flow->dl_src, vlan);
9d078ec2
BP
2308 if (mac
2309 && mac_entry_get_port(xbridge->ml, mac) != in_xbundle->ofbundle
49a73e0c 2310 && (!is_gratuitous_arp(flow, ctx->wc)
9d078ec2 2311 || mac_entry_is_grat_arp_locked(mac))) {
ff69c24a 2312 ovs_rwlock_unlock(&xbridge->ml->rwlock);
2d9b49dd
BP
2313 xlate_report(ctx, OFT_DETAIL,
2314 "SLB bond thinks this packet looped back, "
ff69c24a
FL
2315 "dropping");
2316 return false;
2317 }
2318 ovs_rwlock_unlock(&xbridge->ml->rwlock);
2319 break;
2320 }
2321 }
2322
2323 return true;
2324}
2325
2d9b49dd
BP
2326static bool
2327update_learning_table__(const struct xbridge *xbridge,
2328 struct xbundle *in_xbundle, struct eth_addr dl_src,
2329 int vlan, bool is_grat_arp)
2330{
2331 return (in_xbundle == &ofpp_none_bundle
2332 || !mac_learning_update(xbridge->ml, dl_src, vlan,
2333 is_grat_arp,
2334 in_xbundle->bond != NULL,
2335 in_xbundle->ofbundle));
2336}
2337
ee047520 2338static void
2d9b49dd 2339update_learning_table(const struct xlate_ctx *ctx,
064799a1
JR
2340 struct xbundle *in_xbundle, struct eth_addr dl_src,
2341 int vlan, bool is_grat_arp)
ee047520 2342{
2d9b49dd
BP
2343 if (!update_learning_table__(ctx->xbridge, in_xbundle, dl_src, vlan,
2344 is_grat_arp)) {
2345 xlate_report_debug(ctx, OFT_DETAIL, "learned that "ETH_ADDR_FMT" is "
2346 "on port %s in VLAN %d",
2347 ETH_ADDR_ARGS(dl_src), in_xbundle->name, vlan);
ee047520 2348 }
9583bc14
EJ
2349}
2350
86e2dcdd
FL
2351/* Updates multicast snooping table 'ms' given that a packet matching 'flow'
2352 * was received on 'in_xbundle' in 'vlan' and is either Report or Query. */
2353static void
2d9b49dd 2354update_mcast_snooping_table4__(const struct xlate_ctx *ctx,
06994f87
TLSC
2355 const struct flow *flow,
2356 struct mcast_snooping *ms, int vlan,
2357 struct xbundle *in_xbundle,
2358 const struct dp_packet *packet)
86e2dcdd
FL
2359 OVS_REQ_WRLOCK(ms->rwlock)
2360{
46445c63 2361 const struct igmp_header *igmp;
e3102e42 2362 int count;
46445c63 2363 size_t offset;
06994f87 2364 ovs_be32 ip4 = flow->igmp_group_ip4;
86e2dcdd 2365
46445c63
EC
2366 offset = (char *) dp_packet_l4(packet) - (char *) dp_packet_data(packet);
2367 igmp = dp_packet_at(packet, offset, IGMP_HEADER_LEN);
2368 if (!igmp || csum(igmp, dp_packet_l4_size(packet)) != 0) {
2d9b49dd
BP
2369 xlate_report_debug(ctx, OFT_DETAIL,
2370 "multicast snooping received bad IGMP "
2371 "checksum on port %s in VLAN %d",
2372 in_xbundle->name, vlan);
46445c63
EC
2373 return;
2374 }
2375
86e2dcdd
FL
2376 switch (ntohs(flow->tp_src)) {
2377 case IGMP_HOST_MEMBERSHIP_REPORT:
2378 case IGMPV2_HOST_MEMBERSHIP_REPORT:
964a4d5f 2379 if (mcast_snooping_add_group4(ms, ip4, vlan, in_xbundle->ofbundle)) {
2d9b49dd
BP
2380 xlate_report_debug(ctx, OFT_DETAIL,
2381 "multicast snooping learned that "
2382 IP_FMT" is on port %s in VLAN %d",
2383 IP_ARGS(ip4), in_xbundle->name, vlan);
86e2dcdd
FL
2384 }
2385 break;
2386 case IGMP_HOST_LEAVE_MESSAGE:
964a4d5f 2387 if (mcast_snooping_leave_group4(ms, ip4, vlan, in_xbundle->ofbundle)) {
2d9b49dd
BP
2388 xlate_report_debug(ctx, OFT_DETAIL, "multicast snooping leaving "
2389 IP_FMT" is on port %s in VLAN %d",
2390 IP_ARGS(ip4), in_xbundle->name, vlan);
86e2dcdd
FL
2391 }
2392 break;
2393 case IGMP_HOST_MEMBERSHIP_QUERY:
2394 if (flow->nw_src && mcast_snooping_add_mrouter(ms, vlan,
2d9b49dd
BP
2395 in_xbundle->ofbundle)) {
2396 xlate_report_debug(ctx, OFT_DETAIL, "multicast snooping query "
2397 "from "IP_FMT" is on port %s in VLAN %d",
2398 IP_ARGS(flow->nw_src), in_xbundle->name, vlan);
86e2dcdd
FL
2399 }
2400 break;
e3102e42 2401 case IGMPV3_HOST_MEMBERSHIP_REPORT:
2d9b49dd
BP
2402 count = mcast_snooping_add_report(ms, packet, vlan,
2403 in_xbundle->ofbundle);
2404 if (count) {
2405 xlate_report_debug(ctx, OFT_DETAIL, "multicast snooping processed "
2406 "%d addresses on port %s in VLAN %d",
2407 count, in_xbundle->name, vlan);
e3102e42
TLSC
2408 }
2409 break;
86e2dcdd
FL
2410 }
2411}
2412
06994f87 2413static void
2d9b49dd 2414update_mcast_snooping_table6__(const struct xlate_ctx *ctx,
06994f87
TLSC
2415 const struct flow *flow,
2416 struct mcast_snooping *ms, int vlan,
2417 struct xbundle *in_xbundle,
2418 const struct dp_packet *packet)
2419 OVS_REQ_WRLOCK(ms->rwlock)
2420{
46445c63 2421 const struct mld_header *mld;
06994f87 2422 int count;
46445c63
EC
2423 size_t offset;
2424
2425 offset = (char *) dp_packet_l4(packet) - (char *) dp_packet_data(packet);
2426 mld = dp_packet_at(packet, offset, MLD_HEADER_LEN);
2427
2428 if (!mld ||
2429 packet_csum_upperlayer6(dp_packet_l3(packet),
2430 mld, IPPROTO_ICMPV6,
2431 dp_packet_l4_size(packet)) != 0) {
2d9b49dd
BP
2432 xlate_report_debug(ctx, OFT_DETAIL, "multicast snooping received "
2433 "bad MLD checksum on port %s in VLAN %d",
2434 in_xbundle->name, vlan);
46445c63
EC
2435 return;
2436 }
06994f87
TLSC
2437
2438 switch (ntohs(flow->tp_src)) {
2439 case MLD_QUERY:
2440 if (!ipv6_addr_equals(&flow->ipv6_src, &in6addr_any)
2441 && mcast_snooping_add_mrouter(ms, vlan, in_xbundle->ofbundle)) {
2d9b49dd
BP
2442 xlate_report_debug(ctx, OFT_DETAIL, "multicast snooping query on "
2443 "port %s in VLAN %d", in_xbundle->name, vlan);
06994f87
TLSC
2444 }
2445 break;
2446 case MLD_REPORT:
2447 case MLD_DONE:
2448 case MLD2_REPORT:
2449 count = mcast_snooping_add_mld(ms, packet, vlan, in_xbundle->ofbundle);
2450 if (count) {
2d9b49dd
BP
2451 xlate_report_debug(ctx, OFT_DETAIL, "multicast snooping processed "
2452 "%d addresses on port %s in VLAN %d",
2453 count, in_xbundle->name, vlan);
06994f87
TLSC
2454 }
2455 break;
2456 }
2457}
2458
86e2dcdd
FL
2459/* Updates multicast snooping table 'ms' given that a packet matching 'flow'
2460 * was received on 'in_xbundle' in 'vlan'. */
2461static void
2d9b49dd 2462update_mcast_snooping_table(const struct xlate_ctx *ctx,
86e2dcdd 2463 const struct flow *flow, int vlan,
e3102e42
TLSC
2464 struct xbundle *in_xbundle,
2465 const struct dp_packet *packet)
86e2dcdd 2466{
2d9b49dd 2467 struct mcast_snooping *ms = ctx->xbridge->ms;
86e2dcdd
FL
2468 struct xlate_cfg *xcfg;
2469 struct xbundle *mcast_xbundle;
f4ae6e23 2470 struct mcast_port_bundle *fport;
86e2dcdd
FL
2471
2472 /* Don't learn the OFPP_NONE port. */
2473 if (in_xbundle == &ofpp_none_bundle) {
2474 return;
2475 }
2476
2477 /* Don't learn from flood ports */
2478 mcast_xbundle = NULL;
2479 ovs_rwlock_wrlock(&ms->rwlock);
2480 xcfg = ovsrcu_get(struct xlate_cfg *, &xcfgp);
f4ae6e23 2481 LIST_FOR_EACH(fport, node, &ms->fport_list) {
86e2dcdd
FL
2482 mcast_xbundle = xbundle_lookup(xcfg, fport->port);
2483 if (mcast_xbundle == in_xbundle) {
2484 break;
2485 }
2486 }
2487
2488 if (!mcast_xbundle || mcast_xbundle != in_xbundle) {
06994f87 2489 if (flow->dl_type == htons(ETH_TYPE_IP)) {
2d9b49dd 2490 update_mcast_snooping_table4__(ctx, flow, ms, vlan,
06994f87
TLSC
2491 in_xbundle, packet);
2492 } else {
2d9b49dd 2493 update_mcast_snooping_table6__(ctx, flow, ms, vlan,
06994f87
TLSC
2494 in_xbundle, packet);
2495 }
86e2dcdd
FL
2496 }
2497 ovs_rwlock_unlock(&ms->rwlock);
2498}
2499
2500/* send the packet to ports having the multicast group learned */
2501static void
2502xlate_normal_mcast_send_group(struct xlate_ctx *ctx,
2503 struct mcast_snooping *ms OVS_UNUSED,
2504 struct mcast_group *grp,
f0fb825a
EG
2505 struct xbundle *in_xbundle,
2506 const struct xvlan *xvlan)
86e2dcdd
FL
2507 OVS_REQ_RDLOCK(ms->rwlock)
2508{
2509 struct xlate_cfg *xcfg;
2510 struct mcast_group_bundle *b;
2511 struct xbundle *mcast_xbundle;
2512
2513 xcfg = ovsrcu_get(struct xlate_cfg *, &xcfgp);
2514 LIST_FOR_EACH(b, bundle_node, &grp->bundle_lru) {
2515 mcast_xbundle = xbundle_lookup(xcfg, b->port);
2516 if (mcast_xbundle && mcast_xbundle != in_xbundle) {
2d9b49dd 2517 xlate_report(ctx, OFT_DETAIL, "forwarding to mcast group port");
f0fb825a 2518 output_normal(ctx, mcast_xbundle, xvlan);
86e2dcdd 2519 } else if (!mcast_xbundle) {
2d9b49dd
BP
2520 xlate_report(ctx, OFT_WARN,
2521 "mcast group port is unknown, dropping");
86e2dcdd 2522 } else {
2d9b49dd
BP
2523 xlate_report(ctx, OFT_DETAIL,
2524 "mcast group port is input port, dropping");
86e2dcdd
FL
2525 }
2526 }
2527}
2528
2529/* send the packet to ports connected to multicast routers */
2530static void
2531xlate_normal_mcast_send_mrouters(struct xlate_ctx *ctx,
2532 struct mcast_snooping *ms,
f0fb825a
EG
2533 struct xbundle *in_xbundle,
2534 const struct xvlan *xvlan)
86e2dcdd
FL
2535 OVS_REQ_RDLOCK(ms->rwlock)
2536{
2537 struct xlate_cfg *xcfg;
2538 struct mcast_mrouter_bundle *mrouter;
2539 struct xbundle *mcast_xbundle;
2540
2541 xcfg = ovsrcu_get(struct xlate_cfg *, &xcfgp);
2542 LIST_FOR_EACH(mrouter, mrouter_node, &ms->mrouter_lru) {
2543 mcast_xbundle = xbundle_lookup(xcfg, mrouter->port);
94a881c1 2544 if (mcast_xbundle && mcast_xbundle != in_xbundle
f0fb825a 2545 && mrouter->vlan == xvlan->v[0].vid) {
2d9b49dd 2546 xlate_report(ctx, OFT_DETAIL, "forwarding to mcast router port");
f0fb825a 2547 output_normal(ctx, mcast_xbundle, xvlan);
86e2dcdd 2548 } else if (!mcast_xbundle) {
2d9b49dd
BP
2549 xlate_report(ctx, OFT_WARN,
2550 "mcast router port is unknown, dropping");
f0fb825a 2551 } else if (mrouter->vlan != xvlan->v[0].vid) {
2d9b49dd
BP
2552 xlate_report(ctx, OFT_DETAIL,
2553 "mcast router is on another vlan, dropping");
86e2dcdd 2554 } else {
2d9b49dd
BP
2555 xlate_report(ctx, OFT_DETAIL,
2556 "mcast router port is input port, dropping");
86e2dcdd
FL
2557 }
2558 }
2559}
2560
2561/* send the packet to ports flagged to be flooded */
2562static void
2563xlate_normal_mcast_send_fports(struct xlate_ctx *ctx,
2564 struct mcast_snooping *ms,
f0fb825a
EG
2565 struct xbundle *in_xbundle,
2566 const struct xvlan *xvlan)
86e2dcdd
FL
2567 OVS_REQ_RDLOCK(ms->rwlock)
2568{
2569 struct xlate_cfg *xcfg;
f4ae6e23 2570 struct mcast_port_bundle *fport;
86e2dcdd
FL
2571 struct xbundle *mcast_xbundle;
2572
2573 xcfg = ovsrcu_get(struct xlate_cfg *, &xcfgp);
f4ae6e23 2574 LIST_FOR_EACH(fport, node, &ms->fport_list) {
86e2dcdd
FL
2575 mcast_xbundle = xbundle_lookup(xcfg, fport->port);
2576 if (mcast_xbundle && mcast_xbundle != in_xbundle) {
2d9b49dd 2577 xlate_report(ctx, OFT_DETAIL, "forwarding to mcast flood port");
f0fb825a 2578 output_normal(ctx, mcast_xbundle, xvlan);
86e2dcdd 2579 } else if (!mcast_xbundle) {
2d9b49dd
BP
2580 xlate_report(ctx, OFT_WARN,
2581 "mcast flood port is unknown, dropping");
86e2dcdd 2582 } else {
2d9b49dd
BP
2583 xlate_report(ctx, OFT_DETAIL,
2584 "mcast flood port is input port, dropping");
86e2dcdd
FL
2585 }
2586 }
2587}
2588
8e04a33f
FL
2589/* forward the Reports to configured ports */
2590static void
2591xlate_normal_mcast_send_rports(struct xlate_ctx *ctx,
2592 struct mcast_snooping *ms,
f0fb825a
EG
2593 struct xbundle *in_xbundle,
2594 const struct xvlan *xvlan)
8e04a33f
FL
2595 OVS_REQ_RDLOCK(ms->rwlock)
2596{
2597 struct xlate_cfg *xcfg;
2598 struct mcast_port_bundle *rport;
2599 struct xbundle *mcast_xbundle;
2600
2601 xcfg = ovsrcu_get(struct xlate_cfg *, &xcfgp);
2602 LIST_FOR_EACH(rport, node, &ms->rport_list) {
2603 mcast_xbundle = xbundle_lookup(xcfg, rport->port);
2604 if (mcast_xbundle && mcast_xbundle != in_xbundle) {
2d9b49dd
BP
2605 xlate_report(ctx, OFT_DETAIL,
2606 "forwarding report to mcast flagged port");
f0fb825a 2607 output_normal(ctx, mcast_xbundle, xvlan);
8e04a33f 2608 } else if (!mcast_xbundle) {
2d9b49dd
BP
2609 xlate_report(ctx, OFT_WARN,
2610 "mcast port is unknown, dropping the report");
8e04a33f 2611 } else {
2d9b49dd
BP
2612 xlate_report(ctx, OFT_DETAIL,
2613 "mcast port is input port, dropping the Report");
8e04a33f
FL
2614 }
2615 }
2616}
2617
682800a4
FL
2618static void
2619xlate_normal_flood(struct xlate_ctx *ctx, struct xbundle *in_xbundle,
f0fb825a 2620 struct xvlan *xvlan)
682800a4
FL
2621{
2622 struct xbundle *xbundle;
2623
2624 LIST_FOR_EACH (xbundle, list_node, &ctx->xbridge->xbundles) {
2625 if (xbundle != in_xbundle
f0fb825a 2626 && xbundle_includes_vlan(xbundle, xvlan)
682800a4
FL
2627 && xbundle->floodable
2628 && !xbundle_mirror_out(ctx->xbridge, xbundle)) {
f0fb825a 2629 output_normal(ctx, xbundle, xvlan);
682800a4
FL
2630 }
2631 }
2031ef97 2632 ctx->nf_output_iface = NF_OUT_FLOOD;
682800a4
FL
2633}
2634
a75636c8
BP
2635static bool
2636is_ip_local_multicast(const struct flow *flow, struct flow_wildcards *wc)
2637{
2638 if (flow->dl_type == htons(ETH_TYPE_IP)) {
2639 memset(&wc->masks.nw_dst, 0xff, sizeof wc->masks.nw_dst);
2640 return ip_is_local_multicast(flow->nw_dst);
2641 } else if (flow->dl_type == htons(ETH_TYPE_IPV6)) {
2642 memset(&wc->masks.ipv6_dst, 0xff, sizeof wc->masks.ipv6_dst);
2643 return ipv6_is_all_hosts(&flow->ipv6_dst);
2644 } else {
2645 return false;
2646 }
2647}
2648
9583bc14
EJ
2649static void
2650xlate_normal(struct xlate_ctx *ctx)
2651{
49a73e0c 2652 struct flow_wildcards *wc = ctx->wc;
33bf9176 2653 struct flow *flow = &ctx->xin->flow;
46c88433
EJ
2654 struct xbundle *in_xbundle;
2655 struct xport *in_port;
9583bc14 2656 struct mac_entry *mac;
d6d5bbc9 2657 void *mac_port;
f0fb825a
EG
2658 struct xvlan in_xvlan;
2659 struct xvlan xvlan;
9583bc14 2660 uint16_t vlan;
9583bc14 2661
33bf9176
BP
2662 memset(&wc->masks.dl_src, 0xff, sizeof wc->masks.dl_src);
2663 memset(&wc->masks.dl_dst, 0xff, sizeof wc->masks.dl_dst);
f0fb825a 2664 wc->masks.vlans[0].tci |= htons(VLAN_VID_MASK | VLAN_CFI);
9583bc14 2665
2d9b49dd 2666 in_xbundle = lookup_input_bundle(ctx, flow->in_port.ofp_port, &in_port);
46c88433 2667 if (!in_xbundle) {
2d9b49dd 2668 xlate_report(ctx, OFT_WARN, "no input bundle, dropping");
9583bc14
EJ
2669 return;
2670 }
2671
2672 /* Drop malformed frames. */
f0fb825a
EG
2673 if (eth_type_vlan(flow->dl_type) &&
2674 !(flow->vlans[0].tci & htons(VLAN_CFI))) {
9583bc14 2675 if (ctx->xin->packet != NULL) {
2d9b49dd
BP
2676 xlate_report_error(ctx, "dropping packet with partial "
2677 "VLAN tag received on port %s",
2678 in_xbundle->name);
9583bc14 2679 }
2d9b49dd 2680 xlate_report(ctx, OFT_WARN, "partial VLAN tag, dropping");
9583bc14
EJ
2681 return;
2682 }
2683
2684 /* Drop frames on bundles reserved for mirroring. */
46c88433 2685 if (xbundle_mirror_out(ctx->xbridge, in_xbundle)) {
9583bc14 2686 if (ctx->xin->packet != NULL) {
2d9b49dd
BP
2687 xlate_report_error(ctx, "dropping packet received on port %s, "
2688 "which is reserved exclusively for mirroring",
2689 in_xbundle->name);
9583bc14 2690 }
2d9b49dd
BP
2691 xlate_report(ctx, OFT_WARN,
2692 "input port is mirror output port, dropping");
9583bc14
EJ
2693 return;
2694 }
2695
2696 /* Check VLAN. */
f0fb825a
EG
2697 xvlan_extract(flow, &in_xvlan);
2698 if (!input_vid_is_valid(ctx, in_xvlan.v[0].vid, in_xbundle)) {
2d9b49dd
BP
2699 xlate_report(ctx, OFT_WARN,
2700 "disallowed VLAN VID for this input port, dropping");
9583bc14
EJ
2701 return;
2702 }
f0fb825a
EG
2703 xvlan_input_translate(in_xbundle, &in_xvlan, &xvlan);
2704 vlan = xvlan.v[0].vid;
9583bc14
EJ
2705
2706 /* Check other admissibility requirements. */
2707 if (in_port && !is_admissible(ctx, in_port, vlan)) {
2708 return;
2709 }
2710
2711 /* Learn source MAC. */
064799a1 2712 bool is_grat_arp = is_gratuitous_arp(flow, wc);
875ab130
BP
2713 if (ctx->xin->allow_side_effects
2714 && flow->packet_type == htonl(PT_ETH)
2715 && in_port->pt_mode != NETDEV_PT_LEGACY_L3
2716 ) {
2d9b49dd 2717 update_learning_table(ctx, in_xbundle, flow->dl_src, vlan,
064799a1 2718 is_grat_arp);
9583bc14 2719 }
064799a1 2720 if (ctx->xin->xcache && in_xbundle != &ofpp_none_bundle) {
b256dc52
JS
2721 struct xc_entry *entry;
2722
064799a1 2723 /* Save just enough info to update mac learning table later. */
b256dc52 2724 entry = xlate_cache_add_entry(ctx->xin->xcache, XC_NORMAL);
901a517e
JR
2725 entry->normal.ofproto = ctx->xbridge->ofproto;
2726 entry->normal.in_port = flow->in_port.ofp_port;
2727 entry->normal.dl_src = flow->dl_src;
2728 entry->normal.vlan = vlan;
2729 entry->normal.is_gratuitous_arp = is_grat_arp;
b256dc52 2730 }
9583bc14
EJ
2731
2732 /* Determine output bundle. */
86e2dcdd
FL
2733 if (mcast_snooping_enabled(ctx->xbridge->ms)
2734 && !eth_addr_is_broadcast(flow->dl_dst)
2735 && eth_addr_is_multicast(flow->dl_dst)
06994f87 2736 && is_ip_any(flow)) {
86e2dcdd 2737 struct mcast_snooping *ms = ctx->xbridge->ms;
06994f87 2738 struct mcast_group *grp = NULL;
86e2dcdd 2739
a75636c8 2740 if (is_igmp(flow, wc)) {
1bc24169
BP
2741 /*
2742 * IGMP packets need to take the slow path, in order to be
2743 * processed for mdb updates. That will prevent expires
2744 * firing off even after hosts have sent reports.
2745 */
2746 ctx->xout->slow |= SLOW_ACTION;
2747
a75636c8 2748 memset(&wc->masks.tp_src, 0xff, sizeof wc->masks.tp_src);
d29f137b
TLSC
2749 if (mcast_snooping_is_membership(flow->tp_src) ||
2750 mcast_snooping_is_query(flow->tp_src)) {
df70a773 2751 if (ctx->xin->allow_side_effects && ctx->xin->packet) {
2d9b49dd 2752 update_mcast_snooping_table(ctx, flow, vlan,
e3102e42 2753 in_xbundle, ctx->xin->packet);
d29f137b 2754 }
86e2dcdd 2755 }
d6d5bbc9 2756
86e2dcdd
FL
2757 if (mcast_snooping_is_membership(flow->tp_src)) {
2758 ovs_rwlock_rdlock(&ms->rwlock);
f0fb825a 2759 xlate_normal_mcast_send_mrouters(ctx, ms, in_xbundle, &xvlan);
8e04a33f
FL
2760 /* RFC4541: section 2.1.1, item 1: A snooping switch should
2761 * forward IGMP Membership Reports only to those ports where
2762 * multicast routers are attached. Alternatively stated: a
2763 * snooping switch should not forward IGMP Membership Reports
2764 * to ports on which only hosts are attached.
2765 * An administrative control may be provided to override this
2766 * restriction, allowing the report messages to be flooded to
2767 * other ports. */
f0fb825a 2768 xlate_normal_mcast_send_rports(ctx, ms, in_xbundle, &xvlan);
86e2dcdd
FL
2769 ovs_rwlock_unlock(&ms->rwlock);
2770 } else {
2d9b49dd 2771 xlate_report(ctx, OFT_DETAIL, "multicast traffic, flooding");
f0fb825a 2772 xlate_normal_flood(ctx, in_xbundle, &xvlan);
86e2dcdd
FL
2773 }
2774 return;
a75636c8 2775 } else if (is_mld(flow, wc)) {
06994f87 2776 ctx->xout->slow |= SLOW_ACTION;
df70a773 2777 if (ctx->xin->allow_side_effects && ctx->xin->packet) {
2d9b49dd 2778 update_mcast_snooping_table(ctx, flow, vlan,
06994f87
TLSC
2779 in_xbundle, ctx->xin->packet);
2780 }
a75636c8 2781 if (is_mld_report(flow, wc)) {
06994f87 2782 ovs_rwlock_rdlock(&ms->rwlock);
f0fb825a
EG
2783 xlate_normal_mcast_send_mrouters(ctx, ms, in_xbundle, &xvlan);
2784 xlate_normal_mcast_send_rports(ctx, ms, in_xbundle, &xvlan);
06994f87
TLSC
2785 ovs_rwlock_unlock(&ms->rwlock);
2786 } else {
2d9b49dd 2787 xlate_report(ctx, OFT_DETAIL, "MLD query, flooding");
f0fb825a 2788 xlate_normal_flood(ctx, in_xbundle, &xvlan);
06994f87 2789 }
86e2dcdd 2790 } else {
a75636c8 2791 if (is_ip_local_multicast(flow, wc)) {
86e2dcdd
FL
2792 /* RFC4541: section 2.1.2, item 2: Packets with a dst IP
2793 * address in the 224.0.0.x range which are not IGMP must
2794 * be forwarded on all ports */
2d9b49dd
BP
2795 xlate_report(ctx, OFT_DETAIL,
2796 "RFC4541: section 2.1.2, item 2, flooding");
f0fb825a 2797 xlate_normal_flood(ctx, in_xbundle, &xvlan);
86e2dcdd
FL
2798 return;
2799 }
2800 }
2801
2802 /* forwarding to group base ports */
2803 ovs_rwlock_rdlock(&ms->rwlock);
06994f87
TLSC
2804 if (flow->dl_type == htons(ETH_TYPE_IP)) {
2805 grp = mcast_snooping_lookup4(ms, flow->nw_dst, vlan);
2806 } else if (flow->dl_type == htons(ETH_TYPE_IPV6)) {
2807 grp = mcast_snooping_lookup(ms, &flow->ipv6_dst, vlan);
2808 }
86e2dcdd 2809 if (grp) {
f0fb825a
EG
2810 xlate_normal_mcast_send_group(ctx, ms, grp, in_xbundle, &xvlan);
2811 xlate_normal_mcast_send_fports(ctx, ms, in_xbundle, &xvlan);
2812 xlate_normal_mcast_send_mrouters(ctx, ms, in_xbundle, &xvlan);
9583bc14 2813 } else {
86e2dcdd 2814 if (mcast_snooping_flood_unreg(ms)) {
2d9b49dd
BP
2815 xlate_report(ctx, OFT_DETAIL,
2816 "unregistered multicast, flooding");
f0fb825a 2817 xlate_normal_flood(ctx, in_xbundle, &xvlan);
86e2dcdd 2818 } else {
f0fb825a
EG
2819 xlate_normal_mcast_send_mrouters(ctx, ms, in_xbundle, &xvlan);
2820 xlate_normal_mcast_send_fports(ctx, ms, in_xbundle, &xvlan);
86e2dcdd 2821 }
9583bc14 2822 }
86e2dcdd 2823 ovs_rwlock_unlock(&ms->rwlock);
9583bc14 2824 } else {
86e2dcdd
FL
2825 ovs_rwlock_rdlock(&ctx->xbridge->ml->rwlock);
2826 mac = mac_learning_lookup(ctx->xbridge->ml, flow->dl_dst, vlan);
9d078ec2 2827 mac_port = mac ? mac_entry_get_port(ctx->xbridge->ml, mac) : NULL;
86e2dcdd
FL
2828 ovs_rwlock_unlock(&ctx->xbridge->ml->rwlock);
2829
2830 if (mac_port) {
2831 struct xlate_cfg *xcfg = ovsrcu_get(struct xlate_cfg *, &xcfgp);
2832 struct xbundle *mac_xbundle = xbundle_lookup(xcfg, mac_port);
2833 if (mac_xbundle && mac_xbundle != in_xbundle) {
2d9b49dd 2834 xlate_report(ctx, OFT_DETAIL, "forwarding to learned port");
f0fb825a 2835 output_normal(ctx, mac_xbundle, &xvlan);
86e2dcdd 2836 } else if (!mac_xbundle) {
2d9b49dd
BP
2837 xlate_report(ctx, OFT_WARN,
2838 "learned port is unknown, dropping");
86e2dcdd 2839 } else {
2d9b49dd
BP
2840 xlate_report(ctx, OFT_DETAIL,
2841 "learned port is input port, dropping");
86e2dcdd
FL
2842 }
2843 } else {
2d9b49dd
BP
2844 xlate_report(ctx, OFT_DETAIL,
2845 "no learned MAC for destination, flooding");
f0fb825a 2846 xlate_normal_flood(ctx, in_xbundle, &xvlan);
86e2dcdd 2847 }
9583bc14
EJ
2848 }
2849}
2850
a6092018
BP
2851/* Appends a "sample" action for sFlow or IPFIX to 'ctx->odp_actions'. The
2852 * 'probability' is the number of packets out of UINT32_MAX to sample. The
2853 * 'cookie' (of length 'cookie_size' bytes) is passed back in the callback for
2854 * each sampled packet. 'tunnel_out_port', if not ODPP_NONE, is added as the
2855 * OVS_USERSPACE_ATTR_EGRESS_TUN_PORT attribute. If 'include_actions', an
f69f713b
BY
2856 * OVS_USERSPACE_ATTR_ACTIONS attribute is added. If 'emit_set_tunnel',
2857 * sample(sampling_port=1) would translate into datapath sample action
2858 * set(tunnel(...)), sample(...) and it is used for sampling egress tunnel
2859 * information.
9583bc14
EJ
2860 */
2861static size_t
a6092018 2862compose_sample_action(struct xlate_ctx *ctx,
9583bc14
EJ
2863 const uint32_t probability,
2864 const union user_action_cookie *cookie,
8b7ea2d4 2865 const size_t cookie_size,
7321bda3
NM
2866 const odp_port_t tunnel_out_port,
2867 bool include_actions)
9583bc14 2868{
b97f2c3a
BY
2869 if (probability == 0) {
2870 /* No need to generate sampling or the inner action. */
2871 return 0;
2872 }
2873
31b29c2e
AZ
2874 /* If the slow path meter is configured by the controller,
2875 * insert a meter action before the user space action. */
2876 struct ofproto *ofproto = &ctx->xin->ofproto->up;
2877 uint32_t meter_id = ofproto->slowpath_meter_id;
2878
2879 /* When meter action is not required, avoid generate sample action
2880 * for 100% sampling rate. */
2881 bool is_sample = probability < UINT32_MAX || meter_id != UINT32_MAX;
72471622
BY
2882 size_t sample_offset, actions_offset;
2883 if (is_sample) {
2884 sample_offset = nl_msg_start_nested(ctx->odp_actions,
2885 OVS_ACTION_ATTR_SAMPLE);
2886 nl_msg_put_u32(ctx->odp_actions, OVS_SAMPLE_ATTR_PROBABILITY,
2887 probability);
2888 actions_offset = nl_msg_start_nested(ctx->odp_actions,
2889 OVS_SAMPLE_ATTR_ACTIONS);
2890 }
9583bc14 2891
31b29c2e
AZ
2892 if (meter_id != UINT32_MAX) {
2893 nl_msg_put_u32(ctx->odp_actions, OVS_ACTION_ATTR_METER, meter_id);
2894 }
2895
a6092018
BP
2896 odp_port_t odp_port = ofp_port_to_odp_port(
2897 ctx->xbridge, ctx->xin->flow.in_port.ofp_port);
2898 uint32_t pid = dpif_port_get_pid(ctx->xbridge->dpif, odp_port,
2899 flow_hash_5tuple(&ctx->xin->flow, 0));
2900 int cookie_offset = odp_put_userspace_action(pid, cookie, cookie_size,
2901 tunnel_out_port,
2902 include_actions,
2903 ctx->odp_actions);
89a8a7f0 2904
72471622
BY
2905 if (is_sample) {
2906 nl_msg_end_nested(ctx->odp_actions, actions_offset);
2907 nl_msg_end_nested(ctx->odp_actions, sample_offset);
2908 }
9583bc14 2909
9583bc14
EJ
2910 return cookie_offset;
2911}
2912
a6092018
BP
2913/* If sFLow is not enabled, returns 0 without doing anything.
2914 *
2915 * If sFlow is enabled, appends a template "sample" action to the ODP actions
2916 * in 'ctx'. This action is a template because some of the information needed
2917 * to fill it out is not available until flow translation is complete. In this
2918 * case, this functions returns an offset, which is always nonzero, to pass
2919 * later to fix_sflow_action() to fill in the rest of the template. */
9583bc14 2920static size_t
a6092018 2921compose_sflow_action(struct xlate_ctx *ctx)
9583bc14 2922{
a6092018
BP
2923 struct dpif_sflow *sflow = ctx->xbridge->sflow;
2924 if (!sflow || ctx->xin->flow.in_port.ofp_port == OFPP_NONE) {
9583bc14
EJ
2925 return 0;
2926 }
2927
a6092018
BP
2928 union user_action_cookie cookie = { .type = USER_ACTION_COOKIE_SFLOW };
2929 return compose_sample_action(ctx, dpif_sflow_get_probability(sflow),
7321bda3
NM
2930 &cookie, sizeof cookie.sflow, ODPP_NONE,
2931 true);
9583bc14
EJ
2932}
2933
f69f713b
BY
2934/* If flow IPFIX is enabled, make sure IPFIX flow sample action
2935 * at egress point of tunnel port is just in front of corresponding
2936 * output action. If bridge IPFIX is enabled, this appends an IPFIX
2937 * sample action to 'ctx->odp_actions'. */
9583bc14 2938static void
a6092018 2939compose_ipfix_action(struct xlate_ctx *ctx, odp_port_t output_odp_port)
9583bc14 2940{
a6092018 2941 struct dpif_ipfix *ipfix = ctx->xbridge->ipfix;
8b7ea2d4 2942 odp_port_t tunnel_out_port = ODPP_NONE;
9583bc14 2943
a6092018 2944 if (!ipfix || ctx->xin->flow.in_port.ofp_port == OFPP_NONE) {
9583bc14
EJ
2945 return;
2946 }
2947
8b7ea2d4
WZ
2948 /* For input case, output_odp_port is ODPP_NONE, which is an invalid port
2949 * number. */
2950 if (output_odp_port == ODPP_NONE &&
a6092018 2951 !dpif_ipfix_get_bridge_exporter_input_sampling(ipfix)) {
8b7ea2d4
WZ
2952 return;
2953 }
2954
f69f713b 2955 /* For output case, output_odp_port is valid. */
8b7ea2d4 2956 if (output_odp_port != ODPP_NONE) {
a6092018 2957 if (!dpif_ipfix_get_bridge_exporter_output_sampling(ipfix)) {
8b7ea2d4
WZ
2958 return;
2959 }
2960 /* If tunnel sampling is enabled, put an additional option attribute:
2961 * OVS_USERSPACE_ATTR_TUNNEL_OUT_PORT
2962 */
a6092018
BP
2963 if (dpif_ipfix_get_bridge_exporter_tunnel_sampling(ipfix) &&
2964 dpif_ipfix_get_tunnel_port(ipfix, output_odp_port) ) {
8b7ea2d4
WZ
2965 tunnel_out_port = output_odp_port;
2966 }
2967 }
2968
a6092018
BP
2969 union user_action_cookie cookie = {
2970 .ipfix = {
2971 .type = USER_ACTION_COOKIE_IPFIX,
2972 .output_odp_port = output_odp_port,
2973 }
2974 };
2975 compose_sample_action(ctx,
2976 dpif_ipfix_get_bridge_exporter_probability(ipfix),
7321bda3
NM
2977 &cookie, sizeof cookie.ipfix, tunnel_out_port,
2978 false);
9583bc14
EJ
2979}
2980
a6092018
BP
2981/* Fix "sample" action according to data collected while composing ODP actions,
2982 * as described in compose_sflow_action().
2983 *
2984 * 'user_cookie_offset' must be the offset returned by add_sflow_action(). */
9583bc14 2985static void
a6092018 2986fix_sflow_action(struct xlate_ctx *ctx, unsigned int user_cookie_offset)
9583bc14
EJ
2987{
2988 const struct flow *base = &ctx->base_flow;
2989 union user_action_cookie *cookie;
2990
a6092018 2991 cookie = ofpbuf_at(ctx->odp_actions, user_cookie_offset,
9583bc14
EJ
2992 sizeof cookie->sflow);
2993 ovs_assert(cookie->type == USER_ACTION_COOKIE_SFLOW);
2994
a6092018 2995 cookie->type = USER_ACTION_COOKIE_SFLOW;
f0fb825a 2996 cookie->sflow.vlan_tci = base->vlans[0].tci;
a6092018
BP
2997
2998 /* See http://www.sflow.org/sflow_version_5.txt (search for "Input/output
2999 * port information") for the interpretation of cookie->output. */
3000 switch (ctx->sflow_n_outputs) {
3001 case 0:
3002 /* 0x40000000 | 256 means "packet dropped for unknown reason". */
3003 cookie->sflow.output = 0x40000000 | 256;
3004 break;
3005
3006 case 1:
3007 cookie->sflow.output = dpif_sflow_odp_port_to_ifindex(
3008 ctx->xbridge->sflow, ctx->sflow_odp_port);
3009 if (cookie->sflow.output) {
3010 break;
3011 }
3012 /* Fall through. */
3013 default:
3014 /* 0x80000000 means "multiple output ports. */
3015 cookie->sflow.output = 0x80000000 | ctx->sflow_n_outputs;
3016 break;
3017 }
9583bc14
EJ
3018}
3019
515793d5
BP
3020static bool
3021process_special(struct xlate_ctx *ctx, const struct xport *xport)
db7d4e46 3022{
515793d5 3023 const struct flow *flow = &ctx->xin->flow;
49a73e0c 3024 struct flow_wildcards *wc = ctx->wc;
46c88433 3025 const struct xbridge *xbridge = ctx->xbridge;
515793d5
BP
3026 const struct dp_packet *packet = ctx->xin->packet;
3027 enum slow_path_reason slow;
642dc74d 3028
46c88433 3029 if (!xport) {
515793d5 3030 slow = 0;
46c88433 3031 } else if (xport->cfm && cfm_should_process_flow(xport->cfm, flow, wc)) {
db7d4e46 3032 if (packet) {
46c88433 3033 cfm_process_heartbeat(xport->cfm, packet);
db7d4e46 3034 }
515793d5 3035 slow = SLOW_CFM;
fab52e16 3036 } else if (xport->bfd && bfd_should_process_flow(xport->bfd, flow, wc)) {
db7d4e46 3037 if (packet) {
46c88433 3038 bfd_process_packet(xport->bfd, flow, packet);
60d02c72
AW
3039 /* If POLL received, immediately sends FINAL back. */
3040 if (bfd_should_send_packet(xport->bfd)) {
6d308b28 3041 ofproto_dpif_monitor_port_send_soon(xport->ofport);
60d02c72 3042 }
db7d4e46 3043 }
515793d5 3044 slow = SLOW_BFD;
46c88433 3045 } else if (xport->xbundle && xport->xbundle->lacp
db7d4e46
JP
3046 && flow->dl_type == htons(ETH_TYPE_LACP)) {
3047 if (packet) {
46c88433 3048 lacp_process_packet(xport->xbundle->lacp, xport->ofport, packet);
db7d4e46 3049 }
515793d5 3050 slow = SLOW_LACP;
9efd308e
DV
3051 } else if ((xbridge->stp || xbridge->rstp) &&
3052 stp_should_process_flow(flow, wc)) {
db7d4e46 3053 if (packet) {
f025bcb7
JR
3054 xbridge->stp
3055 ? stp_process_packet(xport, packet)
3056 : rstp_process_packet(xport, packet);
db7d4e46 3057 }
515793d5 3058 slow = SLOW_STP;
19aef6ef 3059 } else if (xport->lldp && lldp_should_process_flow(xport->lldp, flow)) {
0477baa9
DF
3060 if (packet) {
3061 lldp_process_packet(xport->lldp, packet);
3062 }
515793d5 3063 slow = SLOW_LLDP;
db7d4e46 3064 } else {
515793d5
BP
3065 slow = 0;
3066 }
3067
3068 if (slow) {
3069 ctx->xout->slow |= slow;
3070 return true;
3071 } else {
3072 return false;
db7d4e46
JP
3073 }
3074}
3075
a36de779
PS
3076static int
3077tnl_route_lookup_flow(const struct flow *oflow,
a8704b50
PS
3078 struct in6_addr *ip, struct in6_addr *src,
3079 struct xport **out_port)
a36de779
PS
3080{
3081 char out_dev[IFNAMSIZ];
3082 struct xbridge *xbridge;
3083 struct xlate_cfg *xcfg;
c2b878e0
TLSC
3084 struct in6_addr gw;
3085 struct in6_addr dst;
a36de779 3086
c2b878e0 3087 dst = flow_tnl_dst(&oflow->tunnel);
ed52ca57 3088 if (!ovs_router_lookup(oflow->pkt_mark, &dst, out_dev, src, &gw)) {
a36de779
PS
3089 return -ENOENT;
3090 }
3091
c2b878e0
TLSC
3092 if (ipv6_addr_is_set(&gw) &&
3093 (!IN6_IS_ADDR_V4MAPPED(&gw) || in6_addr_get_mapped_ipv4(&gw))) {
a36de779
PS
3094 *ip = gw;
3095 } else {
c2b878e0 3096 *ip = dst;
a36de779
PS
3097 }
3098
3099 xcfg = ovsrcu_get(struct xlate_cfg *, &xcfgp);
3100 ovs_assert(xcfg);
3101
3102 HMAP_FOR_EACH (xbridge, hmap_node, &xcfg->xbridges) {
3103 if (!strncmp(xbridge->name, out_dev, IFNAMSIZ)) {
3104 struct xport *port;
3105
3106 HMAP_FOR_EACH (port, ofp_node, &xbridge->xports) {
3107 if (!strncmp(netdev_get_name(port->netdev), out_dev, IFNAMSIZ)) {
3108 *out_port = port;
3109 return 0;
3110 }
3111 }
3112 }
3113 }
3114 return -ENOENT;
3115}
3116
3117static int
cdd42eda
JG
3118compose_table_xlate(struct xlate_ctx *ctx, const struct xport *out_dev,
3119 struct dp_packet *packet)
a36de779 3120{
cdd42eda 3121 struct xbridge *xbridge = out_dev->xbridge;
a36de779
PS
3122 struct ofpact_output output;
3123 struct flow flow;
3124
3125 ofpact_init(&output.ofpact, OFPACT_OUTPUT, sizeof output);
cf62fa4c 3126 flow_extract(packet, &flow);
cdd42eda
JG
3127 flow.in_port.ofp_port = out_dev->ofp_port;
3128 output.port = OFPP_TABLE;
a36de779
PS
3129 output.max_len = 0;
3130
1f4a8933
JR
3131 return ofproto_dpif_execute_actions__(xbridge->ofproto,
3132 ctx->xin->tables_version, &flow,
3133 NULL, &output.ofpact, sizeof output,
2d9b49dd 3134 ctx->depth, ctx->resubmits, packet);
a36de779
PS
3135}
3136
c2b878e0
TLSC
3137static void
3138tnl_send_nd_request(struct xlate_ctx *ctx, const struct xport *out_dev,
3139 const struct eth_addr eth_src,
3140 struct in6_addr * ipv6_src, struct in6_addr * ipv6_dst)
3141{
3142 struct dp_packet packet;
3143
3144 dp_packet_init(&packet, 0);
16187903 3145 compose_nd_ns(&packet, eth_src, ipv6_src, ipv6_dst);
c2b878e0
TLSC
3146 compose_table_xlate(ctx, out_dev, &packet);
3147 dp_packet_uninit(&packet);
3148}
3149
a36de779 3150static void
cdd42eda 3151tnl_send_arp_request(struct xlate_ctx *ctx, const struct xport *out_dev,
74ff3298 3152 const struct eth_addr eth_src,
a36de779
PS
3153 ovs_be32 ip_src, ovs_be32 ip_dst)
3154{
cf62fa4c 3155 struct dp_packet packet;
a36de779 3156
cf62fa4c 3157 dp_packet_init(&packet, 0);
eb0b295e
BP
3158 compose_arp(&packet, ARP_OP_REQUEST,
3159 eth_src, eth_addr_zero, true, ip_src, ip_dst);
a36de779 3160
cdd42eda 3161 compose_table_xlate(ctx, out_dev, &packet);
cf62fa4c 3162 dp_packet_uninit(&packet);
a36de779
PS
3163}
3164
7c12dfc5
SC
3165static void
3166propagate_tunnel_data_to_flow__(struct flow *dst_flow,
3167 const struct flow *src_flow,
3168 struct eth_addr dmac, struct eth_addr smac,
3169 struct in6_addr s_ip6, ovs_be32 s_ip,
3170 bool is_tnl_ipv6, uint8_t nw_proto)
3171{
3172 dst_flow->dl_dst = dmac;
3173 dst_flow->dl_src = smac;
3174
3175 dst_flow->packet_type = htonl(PT_ETH);
3176 dst_flow->nw_dst = src_flow->tunnel.ip_dst;
3177 dst_flow->nw_src = src_flow->tunnel.ip_src;
3178 dst_flow->ipv6_dst = src_flow->tunnel.ipv6_dst;
3179 dst_flow->ipv6_src = src_flow->tunnel.ipv6_src;
3180
3181 dst_flow->nw_tos = src_flow->tunnel.ip_tos;
3182 dst_flow->nw_ttl = src_flow->tunnel.ip_ttl;
3183 dst_flow->tp_dst = src_flow->tunnel.tp_dst;
3184 dst_flow->tp_src = src_flow->tunnel.tp_src;
3185
3186 if (is_tnl_ipv6) {
3187 dst_flow->dl_type = htons(ETH_TYPE_IPV6);
3188 if (ipv6_mask_is_any(&dst_flow->ipv6_src)
3189 && !ipv6_mask_is_any(&s_ip6)) {
3190 dst_flow->ipv6_src = s_ip6;
3191 }
3192 } else {
3193 dst_flow->dl_type = htons(ETH_TYPE_IP);
3194 if (dst_flow->nw_src == 0 && s_ip) {
3195 dst_flow->nw_src = s_ip;
3196 }
3197 }
3198 dst_flow->nw_proto = nw_proto;
3199}
3200
3201/*
3202 * Populate the 'flow' and 'base_flow' L3 fields to do the post tunnel push
3203 * translations.
3204 */
3205static void
3206propagate_tunnel_data_to_flow(struct xlate_ctx *ctx, struct eth_addr dmac,
3207 struct eth_addr smac, struct in6_addr s_ip6,
3208 ovs_be32 s_ip, bool is_tnl_ipv6,
3209 enum ovs_vport_type tnl_type)
3210{
3211 struct flow *base_flow, *flow;
3212 flow = &ctx->xin->flow;
3213 base_flow = &ctx->base_flow;
3214 uint8_t nw_proto = 0;
3215
3216 switch (tnl_type) {
3217 case OVS_VPORT_TYPE_GRE:
3218 nw_proto = IPPROTO_GRE;
3219 break;
3220 case OVS_VPORT_TYPE_VXLAN:
3221 case OVS_VPORT_TYPE_GENEVE:
3222 nw_proto = IPPROTO_UDP;
3223 break;
3224 case OVS_VPORT_TYPE_LISP:
3225 case OVS_VPORT_TYPE_STT:
3226 case OVS_VPORT_TYPE_UNSPEC:
3227 case OVS_VPORT_TYPE_NETDEV:
3228 case OVS_VPORT_TYPE_INTERNAL:
3229 case __OVS_VPORT_TYPE_MAX:
3230 default:
3231 OVS_NOT_REACHED();
3232 break;
3233 }
3234 /*
3235 * Update base_flow first followed by flow as the dst_flow gets modified
3236 * in the function.
3237 */
3238 propagate_tunnel_data_to_flow__(base_flow, flow, dmac, smac, s_ip6, s_ip,
3239 is_tnl_ipv6, nw_proto);
3240 propagate_tunnel_data_to_flow__(flow, flow, dmac, smac, s_ip6, s_ip,
3241 is_tnl_ipv6, nw_proto);
3242}
3243
3244/* Validate if the transalated combined actions are OK to proceed.
3245 * If actions consist of TRUNC action, it is not allowed to do the
3246 * tunnel_push combine as it cannot update stats correctly.
3247 */
3248static bool
3249is_tunnel_actions_clone_ready(struct xlate_ctx *ctx)
3250{
3251 struct nlattr *tnl_actions;
3252 const struct nlattr *a;
3253 unsigned int left;
3254 size_t actions_len;
3255 struct ofpbuf *actions = ctx->odp_actions;
3256
3257 if (!actions) {
3258 /* No actions, no harm in doing combine. */
3259 return true;
3260 }
3261
3262 /* Cannot perform tunnel push on slow path action CONTROLLER_OUTPUT. */
3263 if (ctx->xout->slow & SLOW_CONTROLLER) {
3264 return false;
3265 }
3266 actions_len = actions->size;
3267
3268 tnl_actions =(struct nlattr *)(actions->data);
3269 NL_ATTR_FOR_EACH_UNSAFE (a, left, tnl_actions, actions_len) {
3270 int type = nl_attr_type(a);
3271 if (type == OVS_ACTION_ATTR_TRUNC) {
3272 VLOG_DBG("Cannot do tunnel action-combine on trunc action");
3273 return false;
3274 break;
3275 }
3276 }
3277 return true;
3278}
3279
3280static bool
3281validate_and_combine_post_tnl_actions(struct xlate_ctx *ctx,
3282 const struct xport *xport,
3283 struct xport *out_dev,
3284 struct ovs_action_push_tnl tnl_push_data)
3285{
3286 const struct dpif_flow_stats *backup_resubmit_stats;
3287 struct xlate_cache *backup_xcache;
3288 bool nested_act_flag = false;
3289 struct flow_wildcards tmp_flow_wc;
3290 struct flow_wildcards *backup_flow_wc_ptr;
3291 bool backup_side_effects;
3292 const struct dp_packet *backup_pkt;
3293
3294 memset(&tmp_flow_wc, 0 , sizeof tmp_flow_wc);
3295 backup_flow_wc_ptr = ctx->wc;
3296 ctx->wc = &tmp_flow_wc;
3297 ctx->xin->wc = NULL;
3298 backup_resubmit_stats = ctx->xin->resubmit_stats;
3299 backup_xcache = ctx->xin->xcache;
3300 backup_side_effects = ctx->xin->allow_side_effects;
3301 backup_pkt = ctx->xin->packet;
3302
3303 size_t push_action_size = 0;
3304 size_t clone_ofs = nl_msg_start_nested(ctx->odp_actions,
3305 OVS_ACTION_ATTR_CLONE);
3306 odp_put_tnl_push_action(ctx->odp_actions, &tnl_push_data);
3307 push_action_size = ctx->odp_actions->size;
3308
3309 ctx->xin->resubmit_stats = NULL;
3310 ctx->xin->xcache = xlate_cache_new(); /* Use new temporary cache. */
3311 ctx->xin->allow_side_effects = false;
3312 ctx->xin->packet = NULL;
3313
3314 /* Push the cache entry for the tunnel first. */
3315 struct xc_entry *entry;
3316 entry = xlate_cache_add_entry(ctx->xin->xcache, XC_TUNNEL_HEADER);
3317 entry->tunnel_hdr.hdr_size = tnl_push_data.header_len;
3318 entry->tunnel_hdr.operation = ADD;
3319
3320 apply_nested_clone_actions(ctx, xport, out_dev);
3321 nested_act_flag = is_tunnel_actions_clone_ready(ctx);
3322
3323 if (nested_act_flag) {
3324 /* Similar to the stats update in revalidation, the x_cache entries
3325 * are populated by the previous translation are used to update the
3326 * stats correctly.
3327 */
3328 if (backup_resubmit_stats) {
3329 struct dpif_flow_stats tmp_resubmit_stats;
3330 memcpy(&tmp_resubmit_stats, backup_resubmit_stats,
3331 sizeof tmp_resubmit_stats);
3332 xlate_push_stats(ctx->xin->xcache, &tmp_resubmit_stats);
3333 }
3334 xlate_cache_steal_entries(backup_xcache, ctx->xin->xcache);
3335 } else {
3336 /* Combine is not valid. */
3337 nl_msg_cancel_nested(ctx->odp_actions, clone_ofs);
3338 goto out;
3339 }
3340 if (ctx->odp_actions->size > push_action_size) {
3341 /* Update the CLONE action only when combined. */
3342 nl_msg_end_nested(ctx->odp_actions, clone_ofs);
3343 } else {
3344 nl_msg_cancel_nested(ctx->odp_actions, clone_ofs);
3345 /* XXX : There is no real use-case for a tunnel push without
3346 * any post actions. However keeping it now
3347 * as is to make the 'make check' happy. Should remove when all the
3348 * make check tunnel test case does something meaningful on a
3349 * tunnel encap packets.
3350 */
3351 odp_put_tnl_push_action(ctx->odp_actions, &tnl_push_data);
3352 }
3353
3354out:
3355 /* Restore context status. */
3356 ctx->xin->resubmit_stats = backup_resubmit_stats;
3357 xlate_cache_delete(ctx->xin->xcache);
3358 ctx->xin->xcache = backup_xcache;
3359 ctx->xin->allow_side_effects = backup_side_effects;
3360 ctx->xin->packet = backup_pkt;
3361 ctx->wc = backup_flow_wc_ptr;
3362 return nested_act_flag;
3363}
3364
a36de779 3365static int
81de18ec 3366build_tunnel_send(struct xlate_ctx *ctx, const struct xport *xport,
a36de779
PS
3367 const struct flow *flow, odp_port_t tunnel_odp_port)
3368{
4975aa3e 3369 struct netdev_tnl_build_header_params tnl_params;
a36de779
PS
3370 struct ovs_action_push_tnl tnl_push_data;
3371 struct xport *out_dev = NULL;
c2b878e0
TLSC
3372 ovs_be32 s_ip = 0, d_ip = 0;
3373 struct in6_addr s_ip6 = in6addr_any;
3374 struct in6_addr d_ip6 = in6addr_any;
74ff3298
JR
3375 struct eth_addr smac;
3376 struct eth_addr dmac;
a36de779 3377 int err;
c2b878e0
TLSC
3378 char buf_sip6[INET6_ADDRSTRLEN];
3379 char buf_dip6[INET6_ADDRSTRLEN];
a36de779 3380
7c12dfc5
SC
3381 /* Structures to backup Ethernet and IP of base_flow. */
3382 struct flow old_base_flow;
3383 struct flow old_flow;
3384
3385 /* Backup flow & base_flow data. */
3386 memcpy(&old_base_flow, &ctx->base_flow, sizeof old_base_flow);
3387 memcpy(&old_flow, &ctx->xin->flow, sizeof old_flow);
3388
a8704b50 3389 err = tnl_route_lookup_flow(flow, &d_ip6, &s_ip6, &out_dev);
a36de779 3390 if (err) {
2d9b49dd 3391 xlate_report(ctx, OFT_WARN, "native tunnel routing failed");
a36de779
PS
3392 return err;
3393 }
c2b878e0 3394
2d9b49dd 3395 xlate_report(ctx, OFT_DETAIL, "tunneling to %s via %s",
c2b878e0
TLSC
3396 ipv6_string_mapped(buf_dip6, &d_ip6),
3397 netdev_get_name(out_dev->netdev));
a36de779
PS
3398
3399 /* Use mac addr of bridge port of the peer. */
74ff3298 3400 err = netdev_get_etheraddr(out_dev->netdev, &smac);
a36de779 3401 if (err) {
2d9b49dd
BP
3402 xlate_report(ctx, OFT_WARN,
3403 "tunnel output device lacks Ethernet address");
a36de779
PS
3404 return err;
3405 }
3406
c2b878e0
TLSC
3407 d_ip = in6_addr_get_mapped_ipv4(&d_ip6);
3408 if (d_ip) {
a8704b50 3409 s_ip = in6_addr_get_mapped_ipv4(&s_ip6);
a36de779
PS
3410 }
3411
c2b878e0 3412 err = tnl_neigh_lookup(out_dev->xbridge->name, &d_ip6, &dmac);
a36de779 3413 if (err) {
2d9b49dd
BP
3414 xlate_report(ctx, OFT_DETAIL,
3415 "neighbor cache miss for %s on bridge %s, "
c2b878e0
TLSC
3416 "sending %s request",
3417 buf_dip6, out_dev->xbridge->name, d_ip ? "ARP" : "ND");
3418 if (d_ip) {
3419 tnl_send_arp_request(ctx, out_dev, smac, s_ip, d_ip);
3420 } else {
3421 tnl_send_nd_request(ctx, out_dev, smac, &s_ip6, &d_ip6);
3422 }
a36de779
PS
3423 return err;
3424 }
c2b878e0 3425
a36de779
PS
3426 if (ctx->xin->xcache) {
3427 struct xc_entry *entry;
3428
53902038 3429 entry = xlate_cache_add_entry(ctx->xin->xcache, XC_TNL_NEIGH);
901a517e
JR
3430 ovs_strlcpy(entry->tnl_neigh_cache.br_name, out_dev->xbridge->name,
3431 sizeof entry->tnl_neigh_cache.br_name);
3432 entry->tnl_neigh_cache.d_ipv6 = d_ip6;
a36de779 3433 }
81de18ec 3434
2d9b49dd 3435 xlate_report(ctx, OFT_DETAIL, "tunneling from "ETH_ADDR_FMT" %s"
c2b878e0
TLSC
3436 " to "ETH_ADDR_FMT" %s",
3437 ETH_ADDR_ARGS(smac), ipv6_string_mapped(buf_sip6, &s_ip6),
3438 ETH_ADDR_ARGS(dmac), buf_dip6);
3439
4975aa3e
PS
3440 netdev_init_tnl_build_header_params(&tnl_params, flow, &s_ip6, dmac, smac);
3441 err = tnl_port_build_header(xport->ofport, &tnl_push_data, &tnl_params);
a36de779
PS
3442 if (err) {
3443 return err;
3444 }
81765c00
BP
3445 tnl_push_data.tnl_port = tunnel_odp_port;
3446 tnl_push_data.out_port = out_dev->odp_port;
beb75a40 3447
7c12dfc5
SC
3448 /* After tunnel header has been added, MAC and IP data of flow and
3449 * base_flow need to be set properly, since there is not recirculation
3450 * any more when sending packet to tunnel. */
beb75a40 3451
7c12dfc5
SC
3452 propagate_tunnel_data_to_flow(ctx, dmac, smac, s_ip6, s_ip,
3453 tnl_params.is_ipv6, tnl_push_data.tnl_type);
3454
3455
3456 /* Try to see if its possible to apply nested clone actions on tunnel.
3457 * Revert the combined actions on tunnel if its not valid.
3458 */
3459 if (!validate_and_combine_post_tnl_actions(ctx, xport, out_dev,
3460 tnl_push_data)) {
3461 /* Datapath is not doing the recirculation now, so lets make it
3462 * happen explicitly.
3463 */
3464 size_t clone_ofs = nl_msg_start_nested(ctx->odp_actions,
3465 OVS_ACTION_ATTR_CLONE);
3466 odp_put_tnl_push_action(ctx->odp_actions, &tnl_push_data);
3467 nl_msg_put_u32(ctx->odp_actions, OVS_ACTION_ATTR_RECIRC, 0);
3468 nl_msg_end_nested(ctx->odp_actions, clone_ofs);
3469 }
3470 /* Restore the flows after the translation. */
3471 memcpy(&ctx->xin->flow, &old_flow, sizeof ctx->xin->flow);
3472 memcpy(&ctx->base_flow, &old_base_flow, sizeof ctx->base_flow);
a36de779
PS
3473 return 0;
3474}
3475
704bb0bf
JS
3476static void
3477xlate_commit_actions(struct xlate_ctx *ctx)
3478{
3479 bool use_masked = ctx->xbridge->support.masked_set_action;
3480
3481 ctx->xout->slow |= commit_odp_actions(&ctx->xin->flow, &ctx->base_flow,
3482 ctx->odp_actions, ctx->wc,
1fc11c59
JS
3483 use_masked, ctx->pending_encap,
3484 ctx->encap_data);
f839892a 3485 ctx->pending_encap = false;
1fc11c59
JS
3486 ofpbuf_delete(ctx->encap_data);
3487 ctx->encap_data = NULL;
704bb0bf
JS
3488}
3489
07659514 3490static void
72fe7578 3491clear_conntrack(struct xlate_ctx *ctx)
07659514 3492{
72fe7578 3493 ctx->conntracked = false;
6846e91e 3494 flow_clear_conntrack(&ctx->xin->flow);
07659514
JS
3495}
3496
58d636ee
BK
3497static bool
3498xlate_flow_is_protected(const struct xlate_ctx *ctx, const struct flow *flow, const struct xport *xport_out)
3499{
3500 const struct xport *xport_in;
3501
3502 if (!xport_out) {
3503 return false;
3504 }
3505
3506 xport_in = get_ofp_port(ctx->xbridge, flow->in_port.ofp_port);
3507
3508 return (xport_in && xport_in->xbundle && xport_out->xbundle &&
3509 xport_in->xbundle->protected && xport_out->xbundle->protected);
3510}
3511
8bdb2bdb
SC
3512/* Function to combine actions from following device/port with the current
3513 * device actions in openflow pipeline. Mainly used for the translation of
3514 * patch/tunnel port output actions. It pushes the openflow state into a stack
3515 * first, clear out to execute the packet through the device and finally pop
3516 * the openflow state back from the stack. This is equivalent to cloning
3517 * a packet in translation for the duration of execution.
3518 *
3519 * On output to a patch port, the output action will be replaced with set of
3520 * nested actions on the peer patch port.
3521 * Similarly on output to a tunnel port, the post nested actions on
3522 * tunnel are chained up with the tunnel-push action.
3523 */
3524static void
3525apply_nested_clone_actions(struct xlate_ctx *ctx, const struct xport *in_dev,
3526 struct xport *out_dev)
3527{
3528 struct flow *flow = &ctx->xin->flow;
3529 struct flow old_flow = ctx->xin->flow;
3530 struct flow_tnl old_flow_tnl_wc = ctx->wc->masks.tunnel;
3531 bool old_conntrack = ctx->conntracked;
3532 bool old_was_mpls = ctx->was_mpls;
3533 ovs_version_t old_version = ctx->xin->tables_version;
3534 struct ofpbuf old_stack = ctx->stack;
3535 uint8_t new_stack[1024];
3536 struct ofpbuf old_action_set = ctx->action_set;
3537 struct ovs_list *old_trace = ctx->xin->trace;
3538 uint64_t actset_stub[1024 / 8];
3539
3540 ofpbuf_use_stub(&ctx->stack, new_stack, sizeof new_stack);
3541 ofpbuf_use_stub(&ctx->action_set, actset_stub, sizeof actset_stub);
3542 flow->in_port.ofp_port = out_dev->ofp_port;
3543 flow->metadata = htonll(0);
3544 memset(&flow->tunnel, 0, sizeof flow->tunnel);
86bfb29a 3545 memset(&ctx->wc->masks.tunnel, 0, sizeof ctx->wc->masks.tunnel);
8bdb2bdb
SC
3546 flow->tunnel.metadata.tab =
3547 ofproto_get_tun_tab(&out_dev->xbridge->ofproto->up);
3548 ctx->wc->masks.tunnel.metadata.tab = flow->tunnel.metadata.tab;
3549 memset(flow->regs, 0, sizeof flow->regs);
3550 flow->actset_output = OFPP_UNSET;
3551 clear_conntrack(ctx);
3552 ctx->xin->trace = xlate_report(ctx, OFT_BRIDGE, "bridge(\"%s\")",
3553 out_dev->xbridge->name);
3554 mirror_mask_t old_mirrors = ctx->mirrors;
3555 bool independent_mirrors = out_dev->xbridge != ctx->xbridge;
3556 if (independent_mirrors) {
3557 ctx->mirrors = 0;
3558 }
3559 ctx->xbridge = out_dev->xbridge;
3560
3561 /* The bridge is now known so obtain its table version. */
3562 ctx->xin->tables_version
3563 = ofproto_dpif_get_tables_version(ctx->xbridge->ofproto);
3564
3565 if (!process_special(ctx, out_dev) && may_receive(out_dev, ctx)) {
3566 if (xport_stp_forward_state(out_dev) &&
3567 xport_rstp_forward_state(out_dev)) {
3568 xlate_table_action(ctx, flow->in_port.ofp_port, 0, true, true,
96c3a6e5 3569 false, true, clone_xlate_actions);
8bdb2bdb
SC
3570 if (!ctx->freezing) {
3571 xlate_action_set(ctx);
3572 }
3573 if (ctx->freezing) {
3574 finish_freezing(ctx);
3575 }
3576 } else {
3577 /* Forwarding is disabled by STP and RSTP. Let OFPP_NORMAL and
3578 * the learning action look at the packet, then drop it. */
3579 struct flow old_base_flow = ctx->base_flow;
3580 size_t old_size = ctx->odp_actions->size;
3581 mirror_mask_t old_mirrors2 = ctx->mirrors;
3582
3583 xlate_table_action(ctx, flow->in_port.ofp_port, 0, true, true,
96c3a6e5 3584 false, true, clone_xlate_actions);
8bdb2bdb
SC
3585 ctx->mirrors = old_mirrors2;
3586 ctx->base_flow = old_base_flow;
3587 ctx->odp_actions->size = old_size;
3588
3589 /* Undo changes that may have been done for freezing. */
3590 ctx_cancel_freeze(ctx);
3591 }
3592 }
3593
3594 ctx->xin->trace = old_trace;
3595 if (independent_mirrors) {
3596 ctx->mirrors = old_mirrors;
3597 }
3598 ctx->xin->flow = old_flow;
3599 ctx->xbridge = in_dev->xbridge;
3600 ofpbuf_uninit(&ctx->action_set);
3601 ctx->action_set = old_action_set;
3602 ofpbuf_uninit(&ctx->stack);
3603 ctx->stack = old_stack;
3604
3605 /* Restore calling bridge's lookup version. */
3606 ctx->xin->tables_version = old_version;
3607
3608 /* Restore to calling bridge tunneling information */
3609 ctx->wc->masks.tunnel = old_flow_tnl_wc;
3610
3611 /* The out bridge popping MPLS should have no effect on the original
3612 * bridge. */
3613 ctx->was_mpls = old_was_mpls;
3614
3615 /* The out bridge's conntrack execution should have no effect on the
3616 * original bridge. */
3617 ctx->conntracked = old_conntrack;
3618
3619 /* The fact that the out bridge exits (for any reason) does not mean
3620 * that the original bridge should exit. Specifically, if the out
3621 * bridge freezes translation, the original bridge must continue
3622 * processing with the original, not the frozen packet! */
3623 ctx->exit = false;
3624
3625 /* Out bridge errors do not propagate back. */
3626 ctx->error = XLATE_OK;
3627
3628 if (ctx->xin->resubmit_stats) {
3629 netdev_vport_inc_tx(in_dev->netdev, ctx->xin->resubmit_stats);
3630 netdev_vport_inc_rx(out_dev->netdev, ctx->xin->resubmit_stats);
3631 if (out_dev->bfd) {
3632 bfd_account_rx(out_dev->bfd, ctx->xin->resubmit_stats);
3633 }
3634 }
3635 if (ctx->xin->xcache) {
3636 struct xc_entry *entry;
3637
3638 entry = xlate_cache_add_entry(ctx->xin->xcache, XC_NETDEV);
3639 entry->dev.tx = netdev_ref(in_dev->netdev);
3640 entry->dev.rx = netdev_ref(out_dev->netdev);
3641 entry->dev.bfd = bfd_ref(out_dev->bfd);
3642 }
3643}
3644
081617f0
JS
3645static bool
3646check_output_prerequisites(struct xlate_ctx *ctx,
3647 const struct xport *xport,
3648 struct flow *flow,
3649 bool check_stp)
9583bc14 3650{
49a73e0c 3651 struct flow_wildcards *wc = ctx->wc;
9583bc14 3652
46c88433 3653 if (!xport) {
2d9b49dd 3654 xlate_report(ctx, OFT_WARN, "Nonexistent output port");
081617f0 3655 return false;
46c88433 3656 } else if (xport->config & OFPUTIL_PC_NO_FWD) {
2d9b49dd 3657 xlate_report(ctx, OFT_DETAIL, "OFPPC_NO_FWD set, skipping output");
081617f0 3658 return false;
1356dbd1 3659 } else if (ctx->mirror_snaplen != 0 && xport->odp_port == ODPP_NONE) {
2d9b49dd
BP
3660 xlate_report(ctx, OFT_WARN,
3661 "Mirror truncate to ODPP_NONE, skipping output");
081617f0 3662 return false;
58d636ee 3663 } else if (xlate_flow_is_protected(ctx, flow, xport)) {
2d9b49dd
BP
3664 xlate_report(ctx, OFT_WARN,
3665 "Flow is between protected ports, skipping output.");
081617f0 3666 return false;
0d1cee12 3667 } else if (check_stp) {
bbbca389 3668 if (is_stp(&ctx->base_flow)) {
9efd308e
DV
3669 if (!xport_stp_should_forward_bpdu(xport) &&
3670 !xport_rstp_should_manage_bpdu(xport)) {
3671 if (ctx->xbridge->stp != NULL) {
2d9b49dd
BP
3672 xlate_report(ctx, OFT_WARN,
3673 "STP not in listening state, "
3674 "skipping bpdu output");
9efd308e 3675 } else if (ctx->xbridge->rstp != NULL) {
2d9b49dd
BP
3676 xlate_report(ctx, OFT_WARN,
3677 "RSTP not managing BPDU in this state, "
3678 "skipping bpdu output");
9efd308e 3679 }
081617f0 3680 return false;
0d1cee12 3681 }
67818616
MV
3682 } else if ((xport->cfm && cfm_should_process_flow(xport->cfm, flow, wc))
3683 || (xport->bfd && bfd_should_process_flow(xport->bfd, flow,
3684 wc))) {
3685 /* Pass; STP should not block link health detection. */
9efd308e
DV
3686 } else if (!xport_stp_forward_state(xport) ||
3687 !xport_rstp_forward_state(xport)) {
3688 if (ctx->xbridge->stp != NULL) {
2d9b49dd
BP
3689 xlate_report(ctx, OFT_WARN,
3690 "STP not in forwarding state, skipping output");
9efd308e 3691 } else if (ctx->xbridge->rstp != NULL) {
2d9b49dd
BP
3692 xlate_report(ctx, OFT_WARN,
3693 "RSTP not in forwarding state, skipping output");
9efd308e 3694 }
081617f0 3695 return false;
0d1cee12 3696 }
9583bc14 3697 }
5dbfe239
ZB
3698
3699 if (xport->pt_mode == NETDEV_PT_LEGACY_L2 &&
3700 flow->packet_type != htonl(PT_ETH)) {
3701 xlate_report(ctx, OFT_WARN, "Trying to send non-Ethernet packet "
3702 "through legacy L2 port. Dropping packet.");
3703 return false;
3704 }
3705
081617f0
JS
3706 return true;
3707}
3708
3709static bool
3710terminate_native_tunnel(struct xlate_ctx *ctx, ofp_port_t ofp_port,
3711 struct flow *flow, struct flow_wildcards *wc,
3712 odp_port_t *tnl_port)
3713{
3714 *tnl_port = ODPP_NONE;
3715
3716 /* XXX: Write better Filter for tunnel port. We can use in_port
3717 * in tunnel-port flow to avoid these checks completely. */
3718 if (ofp_port == OFPP_LOCAL &&
3719 ovs_native_tunneling_is_on(ctx->xbridge->ofproto)) {
3720 *tnl_port = tnl_port_map_lookup(flow, wc);
3721 }
3722
3723 return *tnl_port != ODPP_NONE;
3724}
3725
3726static void
3727compose_output_action__(struct xlate_ctx *ctx, ofp_port_t ofp_port,
feee58b9
AZ
3728 const struct xlate_bond_recirc *xr, bool check_stp,
3729 bool is_last_action OVS_UNUSED)
081617f0
JS
3730{
3731 const struct xport *xport = get_ofp_port(ctx->xbridge, ofp_port);
3732 struct flow_wildcards *wc = ctx->wc;
3733 struct flow *flow = &ctx->xin->flow;
3734 struct flow_tnl flow_tnl;
3735 union flow_vlan_hdr flow_vlans[FLOW_MAX_VLAN_HEADERS];
3736 uint8_t flow_nw_tos;
3737 odp_port_t out_port, odp_port, odp_tnl_port;
3738 bool is_native_tunnel = false;
3739 uint8_t dscp;
5dbfe239
ZB
3740 struct eth_addr flow_dl_dst = flow->dl_dst;
3741 struct eth_addr flow_dl_src = flow->dl_src;
3742 ovs_be32 flow_packet_type = flow->packet_type;
3743 ovs_be16 flow_dl_type = flow->dl_type;
081617f0
JS
3744
3745 /* If 'struct flow' gets additional metadata, we'll need to zero it out
3746 * before traversing a patch port. */
3d2fbd70 3747 BUILD_ASSERT_DECL(FLOW_WC_SEQ == 40);
081617f0
JS
3748 memset(&flow_tnl, 0, sizeof flow_tnl);
3749
3750 if (!check_output_prerequisites(ctx, xport, flow, check_stp)) {
3751 return;
3752 }
9583bc14 3753
875ab130
BP
3754 if (flow->packet_type == htonl(PT_ETH)) {
3755 /* Strip Ethernet header for legacy L3 port. */
3756 if (xport->pt_mode == NETDEV_PT_LEGACY_L3) {
3757 flow->packet_type = PACKET_TYPE_BE(OFPHTN_ETHERTYPE,
3758 ntohs(flow->dl_type));
3759 }
beb75a40
JS
3760 }
3761
46c88433 3762 if (xport->peer) {
8bdb2bdb
SC
3763 apply_nested_clone_actions(ctx, xport, xport->peer);
3764 return;
9583bc14
EJ
3765 }
3766
f0fb825a 3767 memcpy(flow_vlans, flow->vlans, sizeof flow_vlans);
33bf9176 3768 flow_nw_tos = flow->nw_tos;
9583bc14 3769
16194afd
DDP
3770 if (count_skb_priorities(xport)) {
3771 memset(&wc->masks.skb_priority, 0xff, sizeof wc->masks.skb_priority);
3772 if (dscp_from_skb_priority(xport, flow->skb_priority, &dscp)) {
3773 wc->masks.nw_tos |= IP_DSCP_MASK;
3774 flow->nw_tos &= ~IP_DSCP_MASK;
3775 flow->nw_tos |= dscp;
3776 }
9583bc14
EJ
3777 }
3778
46c88433 3779 if (xport->is_tunnel) {
c2b878e0 3780 struct in6_addr dst;
9583bc14
EJ
3781 /* Save tunnel metadata so that changes made due to
3782 * the Logical (tunnel) Port are not visible for any further
3783 * matches, while explicit set actions on tunnel metadata are.
3784 */
a36de779 3785 flow_tnl = flow->tunnel;
49a73e0c 3786 odp_port = tnl_port_send(xport->ofport, flow, ctx->wc);
4e022ec0 3787 if (odp_port == ODPP_NONE) {
2d9b49dd 3788 xlate_report(ctx, OFT_WARN, "Tunneling decided against output");
9583bc14
EJ
3789 goto out; /* restore flow_nw_tos */
3790 }
c2b878e0
TLSC
3791 dst = flow_tnl_dst(&flow->tunnel);
3792 if (ipv6_addr_equals(&dst, &ctx->orig_tunnel_ipv6_dst)) {
2d9b49dd 3793 xlate_report(ctx, OFT_WARN, "Not tunneling to our own address");
9583bc14
EJ
3794 goto out; /* restore flow_nw_tos */
3795 }
3796 if (ctx->xin->resubmit_stats) {
46c88433 3797 netdev_vport_inc_tx(xport->netdev, ctx->xin->resubmit_stats);
9583bc14 3798 }
b256dc52
JS
3799 if (ctx->xin->xcache) {
3800 struct xc_entry *entry;
3801
3802 entry = xlate_cache_add_entry(ctx->xin->xcache, XC_NETDEV);
901a517e 3803 entry->dev.tx = netdev_ref(xport->netdev);
b256dc52 3804 }
9583bc14 3805 out_port = odp_port;
a36de779 3806 if (ovs_native_tunneling_is_on(ctx->xbridge->ofproto)) {
2d9b49dd 3807 xlate_report(ctx, OFT_DETAIL, "output to native tunnel");
081617f0 3808 is_native_tunnel = true;
a36de779 3809 } else {
2d9b49dd 3810 xlate_report(ctx, OFT_DETAIL, "output to kernel tunnel");
1520ef4f 3811 commit_odp_tunnel_action(flow, &ctx->base_flow, ctx->odp_actions);
a36de779
PS
3812 flow->tunnel = flow_tnl; /* Restore tunnel metadata */
3813 }
9583bc14 3814 } else {
46c88433 3815 odp_port = xport->odp_port;
7614e5d0 3816 out_port = odp_port;
9583bc14 3817 }
9583bc14 3818
4e022ec0 3819 if (out_port != ODPP_NONE) {
081617f0 3820 /* Commit accumulated flow updates before output. */
704bb0bf 3821 xlate_commit_actions(ctx);
adcf00ba 3822
e93ef1c7 3823 if (xr) {
081617f0 3824 /* Recirculate the packet. */
347bf289 3825 struct ovs_action_hash *act_hash;
adcf00ba 3826
347bf289 3827 /* Hash action. */
1520ef4f 3828 act_hash = nl_msg_put_unspec_uninit(ctx->odp_actions,
347bf289
AZ
3829 OVS_ACTION_ATTR_HASH,
3830 sizeof *act_hash);
3831 act_hash->hash_alg = xr->hash_alg;
62ac1f20 3832 act_hash->hash_basis = xr->hash_basis;
347bf289
AZ
3833
3834 /* Recirc action. */
1520ef4f 3835 nl_msg_put_u32(ctx->odp_actions, OVS_ACTION_ATTR_RECIRC,
347bf289 3836 xr->recirc_id);
081617f0
JS
3837 } else if (is_native_tunnel) {
3838 /* Output to native tunnel port. */
3839 build_tunnel_send(ctx, xport, flow, odp_port);
3840 flow->tunnel = flow_tnl; /* Restore tunnel metadata */
a36de779 3841
081617f0
JS
3842 } else if (terminate_native_tunnel(ctx, ofp_port, flow, wc,
3843 &odp_tnl_port)) {
3844 /* Intercept packet to be received on native tunnel port. */
3845 nl_msg_put_odp_port(ctx->odp_actions, OVS_ACTION_ATTR_TUNNEL_POP,
3846 odp_tnl_port);
a36de779 3847
081617f0
JS
3848 } else {
3849 /* Tunnel push-pop action is not compatible with
3850 * IPFIX action. */
3851 compose_ipfix_action(ctx, out_port);
3852
3853 /* Handle truncation of the mirrored packet. */
3854 if (ctx->mirror_snaplen > 0 &&
3855 ctx->mirror_snaplen < UINT16_MAX) {
3856 struct ovs_action_trunc *trunc;
3857
3858 trunc = nl_msg_put_unspec_uninit(ctx->odp_actions,
3859 OVS_ACTION_ATTR_TRUNC,
3860 sizeof *trunc);
3861 trunc->max_len = ctx->mirror_snaplen;
3862 if (!ctx->xbridge->support.trunc) {
3863 ctx->xout->slow |= SLOW_ACTION;
1356dbd1
WT
3864 }
3865 }
081617f0
JS
3866
3867 nl_msg_put_odp_port(ctx->odp_actions,
3868 OVS_ACTION_ATTR_OUTPUT,
3869 out_port);
adcf00ba 3870 }
9583bc14 3871
6cbbf4fa
EJ
3872 ctx->sflow_odp_port = odp_port;
3873 ctx->sflow_n_outputs++;
2031ef97 3874 ctx->nf_output_iface = ofp_port;
6cbbf4fa
EJ
3875 }
3876
7efbc3b7
BP
3877 if (mbridge_has_mirrors(ctx->xbridge->mbridge) && xport->xbundle) {
3878 mirror_packet(ctx, xport->xbundle,
3879 xbundle_mirror_dst(xport->xbundle->xbridge,
3880 xport->xbundle));
3881 }
3882
6cbbf4fa 3883 out:
9583bc14 3884 /* Restore flow */
f0fb825a 3885 memcpy(flow->vlans, flow_vlans, sizeof flow->vlans);
33bf9176 3886 flow->nw_tos = flow_nw_tos;
5dbfe239
ZB
3887 flow->dl_dst = flow_dl_dst;
3888 flow->dl_src = flow_dl_src;
3889 flow->packet_type = flow_packet_type;
3890 flow->dl_type = flow_dl_type;
9583bc14
EJ
3891}
3892
3893static void
e93ef1c7 3894compose_output_action(struct xlate_ctx *ctx, ofp_port_t ofp_port,
feee58b9
AZ
3895 const struct xlate_bond_recirc *xr,
3896 bool is_last_action)
9583bc14 3897{
feee58b9 3898 compose_output_action__(ctx, ofp_port, xr, true, is_last_action);
9583bc14
EJ
3899}
3900
bb61b33d 3901static void
feee58b9 3902xlate_recursively(struct xlate_ctx *ctx, struct rule_dpif *rule,
96c3a6e5
AZ
3903 bool deepens, bool is_last_action,
3904 xlate_actions_handler *actions_xlator)
bb61b33d
BP
3905{
3906 struct rule_dpif *old_rule = ctx->rule;
8b1e5560 3907 ovs_be64 old_cookie = ctx->rule_cookie;
dc723c44 3908 const struct rule_actions *actions;
bb61b33d
BP
3909
3910 if (ctx->xin->resubmit_stats) {
70742c7f 3911 rule_dpif_credit_stats(rule, ctx->xin->resubmit_stats);
bb61b33d
BP
3912 }
3913
98b07853 3914 ctx->resubmits++;
790c5d26 3915
790c5d26 3916 ctx->depth += deepens;
bb61b33d 3917 ctx->rule = rule;
07a3cd5c
BP
3918 ctx->rule_cookie = rule->up.flow_cookie;
3919 actions = rule_get_actions(&rule->up);
96c3a6e5
AZ
3920 actions_xlator(actions->ofpacts, actions->ofpacts_len, ctx,
3921 is_last_action);
8b1e5560 3922 ctx->rule_cookie = old_cookie;
bb61b33d 3923 ctx->rule = old_rule;
790c5d26 3924 ctx->depth -= deepens;
bb61b33d
BP
3925}
3926
bd3240ba
SH
3927static bool
3928xlate_resubmit_resource_check(struct xlate_ctx *ctx)
9583bc14 3929{
790c5d26 3930 if (ctx->depth >= MAX_DEPTH) {
2d9b49dd 3931 xlate_report_error(ctx, "over max translation depth %d", MAX_DEPTH);
fff1b9c0 3932 ctx->error = XLATE_RECURSION_TOO_DEEP;
790c5d26 3933 } else if (ctx->resubmits >= MAX_RESUBMITS) {
2d9b49dd 3934 xlate_report_error(ctx, "over %d resubmit actions", MAX_RESUBMITS);
fff1b9c0 3935 ctx->error = XLATE_TOO_MANY_RESUBMITS;
1520ef4f 3936 } else if (ctx->odp_actions->size > UINT16_MAX) {
2d9b49dd 3937 xlate_report_error(ctx, "resubmits yielded over 64 kB of actions");
fff1b9c0
JR
3938 /* NOT an error, as we'll be slow-pathing the flow in this case? */
3939 ctx->exit = true; /* XXX: translation still terminated! */
6fd6ed71 3940 } else if (ctx->stack.size >= 65536) {
2d9b49dd 3941 xlate_report_error(ctx, "resubmits yielded over 64 kB of stack");
fff1b9c0 3942 ctx->error = XLATE_STACK_TOO_DEEP;
98b07853 3943 } else {
bd3240ba
SH
3944 return true;
3945 }
3946
3947 return false;
3948}
3949
2cd20955
JR
3950static void
3951tuple_swap_flow(struct flow *flow, bool ipv4)
3952{
3953 uint8_t nw_proto = flow->nw_proto;
3954 flow->nw_proto = flow->ct_nw_proto;
3955 flow->ct_nw_proto = nw_proto;
3956
3957 if (ipv4) {
3958 ovs_be32 nw_src = flow->nw_src;
3959 flow->nw_src = flow->ct_nw_src;
3960 flow->ct_nw_src = nw_src;
3961
3962 ovs_be32 nw_dst = flow->nw_dst;
3963 flow->nw_dst = flow->ct_nw_dst;
3964 flow->ct_nw_dst = nw_dst;
3965 } else {
3966 struct in6_addr ipv6_src = flow->ipv6_src;
3967 flow->ipv6_src = flow->ct_ipv6_src;
3968 flow->ct_ipv6_src = ipv6_src;
3969
3970 struct in6_addr ipv6_dst = flow->ipv6_dst;
3971 flow->ipv6_dst = flow->ct_ipv6_dst;
3972 flow->ct_ipv6_dst = ipv6_dst;
3973 }
3974
3975 ovs_be16 tp_src = flow->tp_src;
3976 flow->tp_src = flow->ct_tp_src;
3977 flow->ct_tp_src = tp_src;
3978
3979 ovs_be16 tp_dst = flow->tp_dst;
3980 flow->tp_dst = flow->ct_tp_dst;
3981 flow->ct_tp_dst = tp_dst;
3982}
3983
3984static void
3985tuple_swap(struct flow *flow, struct flow_wildcards *wc)
3986{
3987 bool ipv4 = (flow->dl_type == htons(ETH_TYPE_IP));
3988
3989 tuple_swap_flow(flow, ipv4);
3990 tuple_swap_flow(&wc->masks, ipv4);
3991}
3992
bd3240ba 3993static void
6d328fa2 3994xlate_table_action(struct xlate_ctx *ctx, ofp_port_t in_port, uint8_t table_id,
2cd20955 3995 bool may_packet_in, bool honor_table_miss,
96c3a6e5
AZ
3996 bool with_ct_orig, bool is_last_action,
3997 xlate_actions_handler *xlator)
bd3240ba 3998{
e12ec36b
SH
3999 /* Check if we need to recirculate before matching in a table. */
4000 if (ctx->was_mpls) {
4001 ctx_trigger_freeze(ctx);
4002 return;
4003 }
bd3240ba 4004 if (xlate_resubmit_resource_check(ctx)) {
9583bc14 4005 uint8_t old_table_id = ctx->table_id;
3f207910 4006 struct rule_dpif *rule;
9583bc14
EJ
4007
4008 ctx->table_id = table_id;
4009
2cd20955
JR
4010 /* Swap packet fields with CT 5-tuple if requested. */
4011 if (with_ct_orig) {
4012 /* Do not swap if there is no CT tuple, or if key is not IP. */
4013 if (ctx->xin->flow.ct_nw_proto == 0 ||
4014 !is_ip_any(&ctx->xin->flow)) {
4015 xlate_report_error(ctx,
4016 "resubmit(ct) with non-tracked or non-IP packet!");
4017 return;
4018 }
4019 tuple_swap(&ctx->xin->flow, ctx->wc);
4020 }
34dd0d78 4021 rule = rule_dpif_lookup_from_table(ctx->xbridge->ofproto,
1f4a8933 4022 ctx->xin->tables_version,
c0e638aa 4023 &ctx->xin->flow, ctx->wc,
34dd0d78
JR
4024 ctx->xin->resubmit_stats,
4025 &ctx->table_id, in_port,
a027899e
JR
4026 may_packet_in, honor_table_miss,
4027 ctx->xin->xcache);
2cd20955
JR
4028 /* Swap back. */
4029 if (with_ct_orig) {
4030 tuple_swap(&ctx->xin->flow, ctx->wc);
4031 }
ad3efdcb 4032
a2143702 4033 if (rule) {
83709dfa
JR
4034 /* Fill in the cache entry here instead of xlate_recursively
4035 * to make the reference counting more explicit. We take a
4036 * reference in the lookups above if we are going to cache the
4037 * rule. */
4038 if (ctx->xin->xcache) {
4039 struct xc_entry *entry;
4040
4041 entry = xlate_cache_add_entry(ctx->xin->xcache, XC_RULE);
901a517e 4042 entry->rule = rule;
07a3cd5c 4043 ofproto_rule_ref(&rule->up);
83709dfa 4044 }
2d9b49dd
BP
4045
4046 struct ovs_list *old_trace = ctx->xin->trace;
4047 xlate_report_table(ctx, rule, table_id);
feee58b9 4048 xlate_recursively(ctx, rule, table_id <= old_table_id,
96c3a6e5 4049 is_last_action, xlator);
2d9b49dd 4050 ctx->xin->trace = old_trace;
ad3efdcb
EJ
4051 }
4052
9583bc14 4053 ctx->table_id = old_table_id;
98b07853 4054 return;
9583bc14
EJ
4055 }
4056}
4057
76973237 4058/* Consumes the group reference, which is only taken if xcache exists. */
f4fb341b 4059static void
1e684d7d
RW
4060xlate_group_stats(struct xlate_ctx *ctx, struct group_dpif *group,
4061 struct ofputil_bucket *bucket)
4062{
4063 if (ctx->xin->resubmit_stats) {
4064 group_dpif_credit_stats(group, bucket, ctx->xin->resubmit_stats);
4065 }
4066 if (ctx->xin->xcache) {
4067 struct xc_entry *entry;
4068
4069 entry = xlate_cache_add_entry(ctx->xin->xcache, XC_GROUP);
901a517e
JR
4070 entry->group.group = group;
4071 entry->group.bucket = bucket;
1e684d7d
RW
4072 }
4073}
4074
4075static void
feee58b9
AZ
4076xlate_group_bucket(struct xlate_ctx *ctx, struct ofputil_bucket *bucket,
4077 bool is_last_action)
f4fb341b
SH
4078{
4079 uint64_t action_list_stub[1024 / 8];
0a2869d5
BP
4080 struct ofpbuf action_list = OFPBUF_STUB_INITIALIZER(action_list_stub);
4081 struct ofpbuf action_set = ofpbuf_const_initializer(bucket->ofpacts,
4082 bucket->ofpacts_len);
5b09e569 4083 struct flow old_flow = ctx->xin->flow;
e12ec36b 4084 bool old_was_mpls = ctx->was_mpls;
f4fb341b 4085
f4fb341b 4086 ofpacts_execute_action_set(&action_list, &action_set);
790c5d26 4087 ctx->depth++;
feee58b9 4088 do_xlate_actions(action_list.data, action_list.size, ctx, is_last_action);
790c5d26 4089 ctx->depth--;
f4fb341b 4090
f4fb341b 4091 ofpbuf_uninit(&action_list);
5b09e569 4092
77ab5fd2 4093 /* Check if need to freeze. */
1d361a81 4094 if (ctx->freezing) {
77ab5fd2 4095 finish_freezing(ctx);
e672ff9b
JR
4096 }
4097
5b09e569
JR
4098 /* Roll back flow to previous state.
4099 * This is equivalent to cloning the packet for each bucket.
4100 *
4101 * As a side effect any subsequently applied actions will
4102 * also effectively be applied to a clone of the packet taken
4103 * just before applying the all or indirect group.
4104 *
4105 * Note that group buckets are action sets, hence they cannot modify the
4106 * main action set. Also any stack actions are ignored when executing an
4107 * action set, so group buckets cannot change the stack either.
4108 * However, we do allow resubmit actions in group buckets, which could
4109 * break the above assumptions. It is up to the controller to not mess up
4110 * with the action_set and stack in the tables resubmitted to from
4111 * group buckets. */
4112 ctx->xin->flow = old_flow;
4113
e12ec36b
SH
4114 /* The group bucket popping MPLS should have no effect after bucket
4115 * execution. */
4116 ctx->was_mpls = old_was_mpls;
4117
5b09e569
JR
4118 /* The fact that the group bucket exits (for any reason) does not mean that
4119 * the translation after the group action should exit. Specifically, if
1d361a81
BP
4120 * the group bucket freezes translation, the actions after the group action
4121 * must continue processing with the original, not the frozen packet! */
5b09e569 4122 ctx->exit = false;
f4fb341b
SH
4123}
4124
4125static void
feee58b9
AZ
4126xlate_all_group(struct xlate_ctx *ctx, struct group_dpif *group,
4127 bool is_last_action)
f4fb341b 4128{
1e684d7d 4129 struct ofputil_bucket *bucket;
07a3cd5c 4130 LIST_FOR_EACH (bucket, list_node, &group->up.buckets) {
feee58b9
AZ
4131 bool last = is_last_action && !bucket->list_node.next;
4132 xlate_group_bucket(ctx, bucket, last);
f4fb341b 4133 }
1e684d7d 4134 xlate_group_stats(ctx, group, NULL);
f4fb341b
SH
4135}
4136
dd8cd4b4 4137static void
feee58b9
AZ
4138xlate_ff_group(struct xlate_ctx *ctx, struct group_dpif *group,
4139 bool is_last_action)
dd8cd4b4 4140{
1e684d7d 4141 struct ofputil_bucket *bucket;
dd8cd4b4
SH
4142
4143 bucket = group_first_live_bucket(ctx, group, 0);
4144 if (bucket) {
feee58b9 4145 xlate_group_bucket(ctx, bucket, is_last_action);
1e684d7d 4146 xlate_group_stats(ctx, group, bucket);
76973237 4147 } else if (ctx->xin->xcache) {
07a3cd5c 4148 ofproto_group_unref(&group->up);
dd8cd4b4
SH
4149 }
4150}
4151
fe7e5749 4152static void
feee58b9
AZ
4153xlate_default_select_group(struct xlate_ctx *ctx, struct group_dpif *group,
4154 bool is_last_action)
fe7e5749 4155{
49a73e0c 4156 struct flow_wildcards *wc = ctx->wc;
1e684d7d 4157 struct ofputil_bucket *bucket;
fe7e5749
SH
4158 uint32_t basis;
4159
1d1aae0b 4160 basis = flow_hash_symmetric_l4(&ctx->xin->flow, 0);
80e3509d 4161 flow_mask_hash_fields(&ctx->xin->flow, wc, NX_HASH_FIELDS_SYMMETRIC_L4);
fe7e5749
SH
4162 bucket = group_best_live_bucket(ctx, group, basis);
4163 if (bucket) {
feee58b9 4164 xlate_group_bucket(ctx, bucket, is_last_action);
1e684d7d 4165 xlate_group_stats(ctx, group, bucket);
76973237 4166 } else if (ctx->xin->xcache) {
07a3cd5c 4167 ofproto_group_unref(&group->up);
fe7e5749
SH
4168 }
4169}
4170
0c4b9393 4171static void
feee58b9
AZ
4172xlate_hash_fields_select_group(struct xlate_ctx *ctx, struct group_dpif *group,
4173 bool is_last_action)
0c4b9393 4174{
07a3cd5c
BP
4175 const struct field_array *fields = &group->up.props.fields;
4176 const uint8_t *mask_values = fields->values;
4177 uint32_t basis = hash_uint64(group->up.props.selection_method_param);
0c4b9393 4178
07a3cd5c 4179 size_t i;
e8dba719
JR
4180 BITMAP_FOR_EACH_1 (i, MFF_N_IDS, fields->used.bm) {
4181 const struct mf_field *mf = mf_from_id(i);
0c4b9393 4182
5bcd4754 4183 /* Skip fields for which prerequisites are not met. */
e8dba719
JR
4184 if (!mf_are_prereqs_ok(mf, &ctx->xin->flow, ctx->wc)) {
4185 /* Skip the mask bytes for this field. */
4186 mask_values += mf->n_bytes;
4187 continue;
4188 }
0c4b9393 4189
e8dba719
JR
4190 union mf_value value;
4191 union mf_value mask;
0c4b9393 4192
e8dba719
JR
4193 mf_get_value(mf, &ctx->xin->flow, &value);
4194 /* Mask the value. */
4195 for (int j = 0; j < mf->n_bytes; j++) {
4196 mask.b[j] = *mask_values++;
4197 value.b[j] &= mask.b[j];
4198 }
4199 basis = hash_bytes(&value, mf->n_bytes, basis);
1cb20095 4200
e8dba719
JR
4201 /* For tunnels, hash in whether the field is present. */
4202 if (mf_is_tun_metadata(mf)) {
4203 basis = hash_boolean(mf_is_set(mf, &ctx->xin->flow), basis);
0c4b9393 4204 }
e8dba719
JR
4205
4206 mf_mask_field_masked(mf, &mask, ctx->wc);
0c4b9393
SH
4207 }
4208
07a3cd5c 4209 struct ofputil_bucket *bucket = group_best_live_bucket(ctx, group, basis);
0c4b9393 4210 if (bucket) {
feee58b9 4211 xlate_group_bucket(ctx, bucket, is_last_action);
0c4b9393 4212 xlate_group_stats(ctx, group, bucket);
76973237 4213 } else if (ctx->xin->xcache) {
07a3cd5c 4214 ofproto_group_unref(&group->up);
0c4b9393
SH
4215 }
4216}
4217
53cc166a 4218static void
feee58b9
AZ
4219xlate_dp_hash_select_group(struct xlate_ctx *ctx, struct group_dpif *group,
4220 bool is_last_action)
53cc166a
JR
4221{
4222 struct ofputil_bucket *bucket;
4223
4224 /* dp_hash value 0 is special since it means that the dp_hash has not been
4225 * computed, as all computed dp_hash values are non-zero. Therefore
4226 * compare to zero can be used to decide if the dp_hash value is valid
4227 * without masking the dp_hash field. */
4228 if (!ctx->xin->flow.dp_hash) {
07a3cd5c 4229 uint64_t param = group->up.props.selection_method_param;
53cc166a
JR
4230
4231 ctx_trigger_recirculate_with_hash(ctx, param >> 32, (uint32_t)param);
4232 } else {
07a3cd5c 4233 uint32_t n_buckets = group->up.n_buckets;
53cc166a
JR
4234 if (n_buckets) {
4235 /* Minimal mask to cover the number of buckets. */
4236 uint32_t mask = (1 << log_2_ceil(n_buckets)) - 1;
4237 /* Multiplier chosen to make the trivial 1 bit case to
4238 * actually distribute amongst two equal weight buckets. */
4239 uint32_t basis = 0xc2b73583 * (ctx->xin->flow.dp_hash & mask);
4240
4241 ctx->wc->masks.dp_hash |= mask;
4242 bucket = group_best_live_bucket(ctx, group, basis);
4243 if (bucket) {
feee58b9 4244 xlate_group_bucket(ctx, bucket, is_last_action);
53cc166a
JR
4245 xlate_group_stats(ctx, group, bucket);
4246 }
4247 }
4248 }
4249}
4250
7565c3e4 4251static void
feee58b9
AZ
4252xlate_select_group(struct xlate_ctx *ctx, struct group_dpif *group,
4253 bool is_last_action)
7565c3e4 4254{
07a3cd5c 4255 const char *selection_method = group->up.props.selection_method;
7565c3e4 4256
e12ec36b
SH
4257 /* Select groups may access flow keys beyond L2 in order to
4258 * select a bucket. Recirculate as appropriate to make this possible.
4259 */
4260 if (ctx->was_mpls) {
4261 ctx_trigger_freeze(ctx);
4262 }
4263
7565c3e4 4264 if (selection_method[0] == '\0') {
feee58b9 4265 xlate_default_select_group(ctx, group, is_last_action);
0c4b9393 4266 } else if (!strcasecmp("hash", selection_method)) {
feee58b9 4267 xlate_hash_fields_select_group(ctx, group, is_last_action);
53cc166a 4268 } else if (!strcasecmp("dp_hash", selection_method)) {
feee58b9 4269 xlate_dp_hash_select_group(ctx, group, is_last_action);
7565c3e4
SH
4270 } else {
4271 /* Parsing of groups should ensure this never happens */
4272 OVS_NOT_REACHED();
4273 }
4274}
4275
f4fb341b 4276static void
feee58b9
AZ
4277xlate_group_action__(struct xlate_ctx *ctx, struct group_dpif *group,
4278 bool is_last_action)
f4fb341b 4279{
0eb48fe1 4280 bool was_in_group = ctx->in_group;
5a070238
BP
4281 ctx->in_group = true;
4282
07a3cd5c 4283 switch (group->up.type) {
f4fb341b
SH
4284 case OFPGT11_ALL:
4285 case OFPGT11_INDIRECT:
feee58b9 4286 xlate_all_group(ctx, group, is_last_action);
f4fb341b
SH
4287 break;
4288 case OFPGT11_SELECT:
feee58b9 4289 xlate_select_group(ctx, group, is_last_action);
f4fb341b 4290 break;
dd8cd4b4 4291 case OFPGT11_FF:
feee58b9 4292 xlate_ff_group(ctx, group, is_last_action);
dd8cd4b4 4293 break;
f4fb341b 4294 default:
428b2edd 4295 OVS_NOT_REACHED();
f4fb341b 4296 }
5a070238 4297
0eb48fe1 4298 ctx->in_group = was_in_group;
f4fb341b
SH
4299}
4300
4301static bool
feee58b9
AZ
4302xlate_group_action(struct xlate_ctx *ctx, uint32_t group_id,
4303 bool is_last_action)
f4fb341b 4304{
0eb48fe1 4305 if (xlate_resubmit_resource_check(ctx)) {
f4fb341b 4306 struct group_dpif *group;
f4fb341b 4307
76973237
JR
4308 /* Take ref only if xcache exists. */
4309 group = group_dpif_lookup(ctx->xbridge->ofproto, group_id,
1f4a8933 4310 ctx->xin->tables_version, ctx->xin->xcache);
db88b35c
JR
4311 if (!group) {
4312 /* XXX: Should set ctx->error ? */
2d9b49dd
BP
4313 xlate_report(ctx, OFT_WARN, "output to nonexistent group %"PRIu32,
4314 group_id);
f4fb341b
SH
4315 return true;
4316 }
feee58b9 4317 xlate_group_action__(ctx, group, is_last_action);
f4fb341b
SH
4318 }
4319
4320 return false;
4321}
4322
9583bc14
EJ
4323static void
4324xlate_ofpact_resubmit(struct xlate_ctx *ctx,
feee58b9
AZ
4325 const struct ofpact_resubmit *resubmit,
4326 bool is_last_action)
9583bc14 4327{
4e022ec0 4328 ofp_port_t in_port;
9583bc14 4329 uint8_t table_id;
adcf00ba
AZ
4330 bool may_packet_in = false;
4331 bool honor_table_miss = false;
4332
4333 if (ctx->rule && rule_dpif_is_internal(ctx->rule)) {
4334 /* Still allow missed packets to be sent to the controller
4335 * if resubmitting from an internal table. */
4336 may_packet_in = true;
4337 honor_table_miss = true;
4338 }
9583bc14
EJ
4339
4340 in_port = resubmit->in_port;
4341 if (in_port == OFPP_IN_PORT) {
4e022ec0 4342 in_port = ctx->xin->flow.in_port.ofp_port;
9583bc14
EJ
4343 }
4344
4345 table_id = resubmit->table_id;
4346 if (table_id == 255) {
4347 table_id = ctx->table_id;
4348 }
4349
adcf00ba 4350 xlate_table_action(ctx, in_port, table_id, may_packet_in,
feee58b9 4351 honor_table_miss, resubmit->with_ct_orig,
96c3a6e5 4352 is_last_action, do_xlate_actions);
9583bc14
EJ
4353}
4354
4355static void
feee58b9
AZ
4356flood_packet_to_port(struct xlate_ctx *ctx, const struct xport *xport,
4357 bool all, bool is_last_action)
9583bc14 4358{
feee58b9
AZ
4359 if (!xport) {
4360 return;
4361 }
4362
4363 if (all) {
4364 compose_output_action__(ctx, xport->ofp_port, NULL, false,
4365 is_last_action);
4366 } else {
4367 compose_output_action(ctx, xport->ofp_port, NULL, is_last_action);
4368 }
4369}
4370
4371static void
4372flood_packets(struct xlate_ctx *ctx, bool all, bool is_last_action)
4373{
4374 const struct xport *xport, *last = NULL;
9583bc14 4375
feee58b9 4376 /* Use 'last' the keep track of the last output port. */
46c88433
EJ
4377 HMAP_FOR_EACH (xport, ofp_node, &ctx->xbridge->xports) {
4378 if (xport->ofp_port == ctx->xin->flow.in_port.ofp_port) {
9583bc14
EJ
4379 continue;
4380 }
4381
feee58b9
AZ
4382 if (all || !(xport->config & OFPUTIL_PC_NO_FLOOD)) {
4383 /* 'last' is not the last port, send a packet out, and
4384 * update 'last'. */
4385 flood_packet_to_port(ctx, last, all, false);
4386 last = xport;
9583bc14
EJ
4387 }
4388 }
4389
feee58b9
AZ
4390 /* Send the packet to the 'last' port. */
4391 flood_packet_to_port(ctx, last, all, is_last_action);
2031ef97 4392 ctx->nf_output_iface = NF_OUT_FLOOD;
9583bc14
EJ
4393}
4394
27d931da
AZ
4395/* Copy and reformat a partially xlated odp actions to a new
4396 * odp actions list in 'b', so that the new actions list
4397 * can be executed by odp_execute_actions.
4398 *
4399 * When xlate using nested odp actions, such as sample and clone,
4400 * the nested action created by nl_msg_start_nested() may not
4401 * have been properly closed yet, thus can not be executed
4402 * directly.
4403 *
4404 * Since unclosed nested action has to be last action, it can be
4405 * fixed by skipping the outer header, and treating the actions within
4406 * as if they are outside the nested attribute since the effect
4407 * of executing them on packet is the same.
4408 *
4409 * As an optimization, a fully closed 'sample' or 'clone' action
4410 * is skipped since their execution has no effect to the packet.
4411 *
4412 * Returns true if success. 'b' contains the new actions list.
4413 * The caller is responsible for disposing 'b'.
4414 *
4415 * Returns false if error, 'b' has been freed already. */
4416static bool
4417xlate_fixup_actions(struct ofpbuf *b, const struct nlattr *actions,
4418 size_t actions_len)
4419{
4420 const struct nlattr *a;
4421 unsigned int left;
4422
4423 NL_ATTR_FOR_EACH_UNSAFE (a, left, actions, actions_len) {
4424 int type = nl_attr_type(a);
4425
4426 switch ((enum ovs_action_attr) type) {
4427 case OVS_ACTION_ATTR_HASH:
4428 case OVS_ACTION_ATTR_PUSH_VLAN:
4429 case OVS_ACTION_ATTR_POP_VLAN:
4430 case OVS_ACTION_ATTR_PUSH_MPLS:
4431 case OVS_ACTION_ATTR_POP_MPLS:
4432 case OVS_ACTION_ATTR_SET:
4433 case OVS_ACTION_ATTR_SET_MASKED:
4434 case OVS_ACTION_ATTR_TRUNC:
4435 case OVS_ACTION_ATTR_OUTPUT:
4436 case OVS_ACTION_ATTR_TUNNEL_PUSH:
4437 case OVS_ACTION_ATTR_TUNNEL_POP:
4438 case OVS_ACTION_ATTR_USERSPACE:
4439 case OVS_ACTION_ATTR_RECIRC:
4440 case OVS_ACTION_ATTR_CT:
0d11fc52
JR
4441 case OVS_ACTION_ATTR_PUSH_ETH:
4442 case OVS_ACTION_ATTR_POP_ETH:
1fc11c59
JS
4443 case OVS_ACTION_ATTR_ENCAP_NSH:
4444 case OVS_ACTION_ATTR_DECAP_NSH:
5dddf960 4445 case OVS_ACTION_ATTR_METER:
27d931da
AZ
4446 ofpbuf_put(b, a, nl_attr_len_pad(a, left));
4447 break;
4448
4449 case OVS_ACTION_ATTR_CLONE:
4450 /* If the clone action has been fully xlated, it can
4451 * be skipped, since any actions executed within clone
4452 * do not affect the current packet.
4453 *
4454 * When xlating actions within clone, the clone action,
4455 * because it is an nested netlink attribute, do not have
4456 * a valid 'nla_len'; it will be zero instead. Skip
4457 * the clone header to find the start of the actions
4458 * enclosed. Treat those actions as if they are written
4459 * outside of clone. */
4460 if (!a->nla_len) {
4461 bool ok;
4462 if (left < NLA_HDRLEN) {
4463 goto error;
4464 }
4465
4466 ok = xlate_fixup_actions(b, nl_attr_get_unspec(a, 0),
4467 left - NLA_HDRLEN);
4468 if (!ok) {
4469 goto error;
4470 }
4471 }
4472 break;
4473
4474 case OVS_ACTION_ATTR_SAMPLE:
4475 if (!a->nla_len) {
4476 bool ok;
4477 if (left < NLA_HDRLEN) {
4478 goto error;
4479 }
4480 const struct nlattr *attr = nl_attr_get_unspec(a, 0);
4481 left -= NLA_HDRLEN;
4482
4483 while (left > 0 &&
4484 nl_attr_type(attr) != OVS_SAMPLE_ATTR_ACTIONS) {
4485 /* Only OVS_SAMPLE_ATTR_ACTIONS can have unclosed
4486 * nested netlink attribute. */
4487 if (!attr->nla_len) {
4488 goto error;
4489 }
4490
4491 left -= NLA_ALIGN(attr->nla_len);
4492 attr = nl_attr_next(attr);
4493 }
4494
4495 if (left < NLA_HDRLEN) {
4496 goto error;
4497 }
4498
4499 ok = xlate_fixup_actions(b, nl_attr_get_unspec(attr, 0),
4500 left - NLA_HDRLEN);
4501 if (!ok) {
4502 goto error;
4503 }
4504 }
4505 break;
4506
4507 case OVS_ACTION_ATTR_UNSPEC:
4508 case __OVS_ACTION_ATTR_MAX:
4509 OVS_NOT_REACHED();
4510 }
4511 }
4512
4513 return true;
4514
4515error:
4516 ofpbuf_delete(b);
4517 return false;
4518}
4519
4520static bool
4521xlate_execute_odp_actions(struct dp_packet *packet,
4522 const struct nlattr *actions, int actions_len)
4523{
4524 struct dp_packet_batch batch;
4525 struct ofpbuf *b = ofpbuf_new(actions_len);
4526
4527 if (!xlate_fixup_actions(b, actions, actions_len)) {
4528 return false;
4529 }
4530
4531 dp_packet_batch_init_packet(&batch, packet);
4532 odp_execute_actions(NULL, &batch, false, b->data, b->size, NULL);
4533 ofpbuf_delete(b);
4534
4535 return true;
4536}
4537
9583bc14
EJ
4538static void
4539execute_controller_action(struct xlate_ctx *ctx, int len,
4540 enum ofp_packet_in_reason reason,
bdcad671
BP
4541 uint16_t controller_id,
4542 const uint8_t *userdata, size_t userdata_len)
9583bc14 4543{
e14deea0 4544 struct dp_packet *packet;
9583bc14 4545
04594cd5 4546 ctx->xout->slow |= SLOW_CONTROLLER;
b476e2f2 4547 xlate_commit_actions(ctx);
9583bc14
EJ
4548 if (!ctx->xin->packet) {
4549 return;
4550 }
4551
df70a773
JR
4552 if (!ctx->xin->allow_side_effects && !ctx->xin->xcache) {
4553 return;
4554 }
4555
cf62fa4c 4556 packet = dp_packet_clone(ctx->xin->packet);
27d931da
AZ
4557 if (!xlate_execute_odp_actions(packet, ctx->odp_actions->data,
4558 ctx->odp_actions->size)) {
4559 xlate_report_error(ctx, "Failed to execute controller action");
4560 dp_packet_delete(packet);
4561 return;
4562 }
beb75a40 4563
9bfe9334
BP
4564 /* A packet sent by an action in a table-miss rule is considered an
4565 * explicit table miss. OpenFlow before 1.3 doesn't have that concept so
4566 * it will get translated back to OFPR_ACTION for those versions. */
4567 if (reason == OFPR_ACTION
07a3cd5c 4568 && ctx->rule && rule_is_table_miss(&ctx->rule->up)) {
9bfe9334
BP
4569 reason = OFPR_EXPLICIT_MISS;
4570 }
4571
4572 size_t packet_len = dp_packet_size(packet);
0fb7792a 4573
a2b53dec
BP
4574 struct ofproto_async_msg *am = xmalloc(sizeof *am);
4575 *am = (struct ofproto_async_msg) {
9bfe9334 4576 .controller_id = controller_id,
a2b53dec
BP
4577 .oam = OAM_PACKET_IN,
4578 .pin = {
4579 .up = {
4d617a87 4580 .base = {
77ab5fd2
BP
4581 .packet = dp_packet_steal_data(packet),
4582 .packet_len = packet_len,
4583 .reason = reason,
4584 .table_id = ctx->table_id,
4585 .cookie = ctx->rule_cookie,
4586 .userdata = (userdata_len
4587 ? xmemdup(userdata, userdata_len)
4588 : NULL),
4589 .userdata_len = userdata_len,
4590 }
a2b53dec
BP
4591 },
4592 .max_len = len,
9bfe9334 4593 },
9bfe9334 4594 };
4d617a87 4595 flow_get_metadata(&ctx->xin->flow, &am->pin.up.base.flow_metadata);
9583bc14 4596
df70a773
JR
4597 /* Async messages are only sent once, so if we send one now, no
4598 * xlate cache entry is created. */
4599 if (ctx->xin->allow_side_effects) {
4600 ofproto_dpif_send_async_msg(ctx->xbridge->ofproto, am);
4601 } else /* xcache */ {
4602 struct xc_entry *entry;
4603
4604 entry = xlate_cache_add_entry(ctx->xin->xcache, XC_CONTROLLER);
4605 entry->controller.ofproto = ctx->xbridge->ofproto;
4606 entry->controller.am = am;
4607 }
3b4fff43
RM
4608
4609 dp_packet_delete(packet);
9583bc14
EJ
4610}
4611
7bbdd84f 4612static void
77ab5fd2 4613emit_continuation(struct xlate_ctx *ctx, const struct frozen_state *state)
7bbdd84f 4614{
df70a773
JR
4615 if (!ctx->xin->allow_side_effects && !ctx->xin->xcache) {
4616 return;
4617 }
4618
77ab5fd2
BP
4619 struct ofproto_async_msg *am = xmalloc(sizeof *am);
4620 *am = (struct ofproto_async_msg) {
4621 .controller_id = ctx->pause->controller_id,
4622 .oam = OAM_PACKET_IN,
4623 .pin = {
4624 .up = {
4d617a87 4625 .base = {
77ab5fd2
BP
4626 .userdata = xmemdup(ctx->pause->userdata,
4627 ctx->pause->userdata_len),
4628 .userdata_len = ctx->pause->userdata_len,
4629 .packet = xmemdup(dp_packet_data(ctx->xin->packet),
4630 dp_packet_size(ctx->xin->packet)),
4631 .packet_len = dp_packet_size(ctx->xin->packet),
0b024e49 4632 .reason = ctx->pause->reason,
77ab5fd2 4633 },
07a3cd5c 4634 .bridge = ctx->xbridge->ofproto->uuid,
84cf3c1f
JR
4635 .stack = xmemdup(state->stack, state->stack_size),
4636 .stack_size = state->stack_size,
77ab5fd2
BP
4637 .mirrors = state->mirrors,
4638 .conntracked = state->conntracked,
4639 .actions = xmemdup(state->ofpacts, state->ofpacts_len),
4640 .actions_len = state->ofpacts_len,
4641 .action_set = xmemdup(state->action_set,
4642 state->action_set_len),
4643 .action_set_len = state->action_set_len,
4644 },
4645 .max_len = UINT16_MAX,
4646 },
4647 };
4d617a87 4648 flow_get_metadata(ctx->paused_flow, &am->pin.up.base.flow_metadata);
df70a773
JR
4649
4650 /* Async messages are only sent once, so if we send one now, no
4651 * xlate cache entry is created. */
4652 if (ctx->xin->allow_side_effects) {
4653 ofproto_dpif_send_async_msg(ctx->xbridge->ofproto, am);
4654 } else /* xcache */ {
4655 struct xc_entry *entry;
4656
4657 entry = xlate_cache_add_entry(ctx->xin->xcache, XC_CONTROLLER);
4658 entry->controller.ofproto = ctx->xbridge->ofproto;
4659 entry->controller.am = am;
4660 }
77ab5fd2 4661}
7bbdd84f 4662
e6bc8e74
YHW
4663/* Creates a frozen state, and allocates a unique recirc id for the given
4664 * state. Returns a non-zero recirc id if it is allocated successfully.
4665 * Returns 0 otherwise.
4666 **/
4667static uint32_t
77ab5fd2
BP
4668finish_freezing__(struct xlate_ctx *ctx, uint8_t table)
4669{
e6bc8e74 4670 uint32_t id = 0;
1d361a81 4671 ovs_assert(ctx->freezing);
7bbdd84f 4672
1d361a81 4673 struct frozen_state state = {
07659514 4674 .table_id = table,
07a3cd5c 4675 .ofproto_uuid = ctx->xbridge->ofproto->uuid,
5c1b2314 4676 .stack = ctx->stack.data,
84cf3c1f 4677 .stack_size = ctx->stack.size,
29bae541 4678 .mirrors = ctx->mirrors,
07659514 4679 .conntracked = ctx->conntracked,
1d361a81
BP
4680 .ofpacts = ctx->frozen_actions.data,
4681 .ofpacts_len = ctx->frozen_actions.size,
417509fa 4682 .action_set = ctx->action_set.data,
8a5fb3b4 4683 .action_set_len = ctx->action_set.size,
2082425c 4684 };
77ab5fd2 4685 frozen_metadata_from_flow(&state.metadata, &ctx->xin->flow);
2082425c 4686
77ab5fd2
BP
4687 if (ctx->pause) {
4688 if (ctx->xin->packet) {
4689 emit_continuation(ctx, &state);
4690 }
4691 } else {
4692 /* Allocate a unique recirc id for the given metadata state in the
4693 * flow. An existing id, with a new reference to the corresponding
4694 * recirculation context, will be returned if possible.
4695 * The life-cycle of this recirc id is managed by associating it
4696 * with the udpif key ('ukey') created for each new datapath flow. */
e6bc8e74 4697 id = recirc_alloc_id_ctx(&state);
77ab5fd2 4698 if (!id) {
2d9b49dd 4699 xlate_report_error(ctx, "Failed to allocate recirculation id");
77ab5fd2 4700 ctx->error = XLATE_NO_RECIRCULATION_CONTEXT;
e6bc8e74 4701 return 0;
77ab5fd2
BP
4702 }
4703 recirc_refs_add(&ctx->xout->recircs, id);
7bbdd84f 4704
53cc166a
JR
4705 if (ctx->recirc_update_dp_hash) {
4706 struct ovs_action_hash *act_hash;
4707
4708 /* Hash action. */
4709 act_hash = nl_msg_put_unspec_uninit(ctx->odp_actions,
4710 OVS_ACTION_ATTR_HASH,
4711 sizeof *act_hash);
4712 act_hash->hash_alg = OVS_HASH_ALG_L4; /* Make configurable. */
4713 act_hash->hash_basis = 0; /* Make configurable. */
4714 }
77ab5fd2
BP
4715 nl_msg_put_u32(ctx->odp_actions, OVS_ACTION_ATTR_RECIRC, id);
4716 }
e672ff9b 4717
1d361a81
BP
4718 /* Undo changes done by freezing. */
4719 ctx_cancel_freeze(ctx);
e6bc8e74 4720 return id;
7bbdd84f
SH
4721}
4722
1d361a81 4723/* Called only when we're freezing. */
07659514 4724static void
77ab5fd2 4725finish_freezing(struct xlate_ctx *ctx)
07659514
JS
4726{
4727 xlate_commit_actions(ctx);
77ab5fd2 4728 finish_freezing__(ctx, 0);
07659514
JS
4729}
4730
e37b8437
JS
4731/* Fork the pipeline here. The current packet will continue processing the
4732 * current action list. A clone of the current packet will recirculate, skip
4733 * the remainder of the current action list and asynchronously resume pipeline
4734 * processing in 'table' with the current metadata and action set. */
4735static void
4736compose_recirculate_and_fork(struct xlate_ctx *ctx, uint8_t table)
4737{
e6bc8e74 4738 uint32_t recirc_id;
1d361a81 4739 ctx->freezing = true;
e6bc8e74
YHW
4740 recirc_id = finish_freezing__(ctx, table);
4741
4742 if (OVS_UNLIKELY(ctx->xin->trace) && recirc_id) {
4743 if (oftrace_add_recirc_node(ctx->xin->recirc_queue,
4744 OFT_RECIRC_CONNTRACK, &ctx->xin->flow,
4745 ctx->xin->packet, recirc_id)) {
4746 xlate_report(ctx, OFT_DETAIL, "A clone of the packet is forked to "
4747 "recirculate. The forked pipeline will be resumed at "
4748 "table %u.", table);
4749 } else {
4750 xlate_report(ctx, OFT_DETAIL, "Failed to trace the conntrack "
4751 "forked pipeline with recirc_id = %d.", recirc_id);
4752 }
4753 }
e37b8437
JS
4754}
4755
8bfd0fda
BP
4756static void
4757compose_mpls_push_action(struct xlate_ctx *ctx, struct ofpact_push_mpls *mpls)
9583bc14 4758{
33bf9176 4759 struct flow *flow = &ctx->xin->flow;
8bfd0fda 4760 int n;
33bf9176 4761
8bfd0fda 4762 ovs_assert(eth_type_mpls(mpls->ethertype));
b0a17866 4763
49a73e0c 4764 n = flow_count_mpls_labels(flow, ctx->wc);
8bfd0fda 4765 if (!n) {
704bb0bf 4766 xlate_commit_actions(ctx);
8bfd0fda
BP
4767 } else if (n >= FLOW_MAX_MPLS_LABELS) {
4768 if (ctx->xin->packet != NULL) {
2d9b49dd
BP
4769 xlate_report_error(ctx, "dropping packet on which an MPLS push "
4770 "action can't be performed as it would have "
4771 "more MPLS LSEs than the %d supported.",
4772 FLOW_MAX_MPLS_LABELS);
9583bc14 4773 }
fff1b9c0 4774 ctx->error = XLATE_TOO_MANY_MPLS_LABELS;
8bfd0fda 4775 return;
9583bc14 4776 }
b0a17866 4777
742c0ac3
JR
4778 /* Update flow's MPLS stack, and clear L3/4 fields to mark them invalid. */
4779 flow_push_mpls(flow, n, mpls->ethertype, ctx->wc, true);
9583bc14
EJ
4780}
4781
8bfd0fda 4782static void
9cfef3d0 4783compose_mpls_pop_action(struct xlate_ctx *ctx, ovs_be16 eth_type)
9583bc14 4784{
8bfd0fda 4785 struct flow *flow = &ctx->xin->flow;
49a73e0c 4786 int n = flow_count_mpls_labels(flow, ctx->wc);
33bf9176 4787
49a73e0c 4788 if (flow_pop_mpls(flow, n, eth_type, ctx->wc)) {
8bf009bf 4789 if (!eth_type_mpls(eth_type) && ctx->xbridge->support.odp.recirc) {
e12ec36b 4790 ctx->was_mpls = true;
7bbdd84f
SH
4791 }
4792 } else if (n >= FLOW_MAX_MPLS_LABELS) {
8bfd0fda 4793 if (ctx->xin->packet != NULL) {
2d9b49dd
BP
4794 xlate_report_error(ctx, "dropping packet on which an "
4795 "MPLS pop action can't be performed as it has "
4796 "more MPLS LSEs than the %d supported.",
4797 FLOW_MAX_MPLS_LABELS);
8bfd0fda 4798 }
fff1b9c0 4799 ctx->error = XLATE_TOO_MANY_MPLS_LABELS;
1520ef4f 4800 ofpbuf_clear(ctx->odp_actions);
9583bc14
EJ
4801 }
4802}
4803
4804static bool
4805compose_dec_ttl(struct xlate_ctx *ctx, struct ofpact_cnt_ids *ids)
4806{
33bf9176
BP
4807 struct flow *flow = &ctx->xin->flow;
4808
4809 if (!is_ip_any(flow)) {
9583bc14
EJ
4810 return false;
4811 }
4812
49a73e0c 4813 ctx->wc->masks.nw_ttl = 0xff;
33bf9176
BP
4814 if (flow->nw_ttl > 1) {
4815 flow->nw_ttl--;
9583bc14
EJ
4816 return false;
4817 } else {
4818 size_t i;
4819
4820 for (i = 0; i < ids->n_controllers; i++) {
4821 execute_controller_action(ctx, UINT16_MAX, OFPR_INVALID_TTL,
bdcad671 4822 ids->cnt_ids[i], NULL, 0);
9583bc14
EJ
4823 }
4824
4825 /* Stop processing for current table. */
2d9b49dd
BP
4826 xlate_report(ctx, OFT_WARN, "IPv%d decrement TTL exception",
4827 flow->dl_type == htons(ETH_TYPE_IP) ? 4 : 6);
9583bc14
EJ
4828 return true;
4829 }
4830}
4831
8bfd0fda 4832static void
097d4939
JR
4833compose_set_mpls_label_action(struct xlate_ctx *ctx, ovs_be32 label)
4834{
8bfd0fda 4835 if (eth_type_mpls(ctx->xin->flow.dl_type)) {
49a73e0c 4836 ctx->wc->masks.mpls_lse[0] |= htonl(MPLS_LABEL_MASK);
8bfd0fda 4837 set_mpls_lse_label(&ctx->xin->flow.mpls_lse[0], label);
097d4939 4838 }
097d4939
JR
4839}
4840
8bfd0fda 4841static void
097d4939
JR
4842compose_set_mpls_tc_action(struct xlate_ctx *ctx, uint8_t tc)
4843{
8bfd0fda 4844 if (eth_type_mpls(ctx->xin->flow.dl_type)) {
49a73e0c 4845 ctx->wc->masks.mpls_lse[0] |= htonl(MPLS_TC_MASK);
8bfd0fda 4846 set_mpls_lse_tc(&ctx->xin->flow.mpls_lse[0], tc);
097d4939 4847 }
097d4939
JR
4848}
4849
8bfd0fda 4850static void
9cfef3d0 4851compose_set_mpls_ttl_action(struct xlate_ctx *ctx, uint8_t ttl)
9583bc14 4852{
8bfd0fda 4853 if (eth_type_mpls(ctx->xin->flow.dl_type)) {
49a73e0c 4854 ctx->wc->masks.mpls_lse[0] |= htonl(MPLS_TTL_MASK);
8bfd0fda 4855 set_mpls_lse_ttl(&ctx->xin->flow.mpls_lse[0], ttl);
b0a17866 4856 }
9583bc14
EJ
4857}
4858
4859static bool
9cfef3d0 4860compose_dec_mpls_ttl_action(struct xlate_ctx *ctx)
9583bc14 4861{
33bf9176 4862 struct flow *flow = &ctx->xin->flow;
1dd35f8a 4863
8bfd0fda 4864 if (eth_type_mpls(flow->dl_type)) {
22d38fca
JR
4865 uint8_t ttl = mpls_lse_to_ttl(flow->mpls_lse[0]);
4866
49a73e0c 4867 ctx->wc->masks.mpls_lse[0] |= htonl(MPLS_TTL_MASK);
8bfd0fda
BP
4868 if (ttl > 1) {
4869 ttl--;
4870 set_mpls_lse_ttl(&flow->mpls_lse[0], ttl);
4871 return false;
4872 } else {
bdcad671
BP
4873 execute_controller_action(ctx, UINT16_MAX, OFPR_INVALID_TTL, 0,
4874 NULL, 0);
8bfd0fda 4875 }
9583bc14 4876 }
22d38fca
JR
4877
4878 /* Stop processing for current table. */
2d9b49dd 4879 xlate_report(ctx, OFT_WARN, "MPLS decrement TTL exception");
22d38fca 4880 return true;
9583bc14
EJ
4881}
4882
4883static void
4884xlate_output_action(struct xlate_ctx *ctx,
feee58b9
AZ
4885 ofp_port_t port, uint16_t max_len, bool may_packet_in,
4886 bool is_last_action)
9583bc14 4887{
2031ef97 4888 ofp_port_t prev_nf_output_iface = ctx->nf_output_iface;
9583bc14 4889
2031ef97 4890 ctx->nf_output_iface = NF_OUT_DROP;
9583bc14
EJ
4891
4892 switch (port) {
4893 case OFPP_IN_PORT:
feee58b9
AZ
4894 compose_output_action(ctx, ctx->xin->flow.in_port.ofp_port, NULL,
4895 is_last_action);
9583bc14
EJ
4896 break;
4897 case OFPP_TABLE:
4e022ec0 4898 xlate_table_action(ctx, ctx->xin->flow.in_port.ofp_port,
96c3a6e5
AZ
4899 0, may_packet_in, true, false, is_last_action,
4900 do_xlate_actions);
9583bc14
EJ
4901 break;
4902 case OFPP_NORMAL:
4903 xlate_normal(ctx);
4904 break;
4905 case OFPP_FLOOD:
feee58b9 4906 flood_packets(ctx, false, is_last_action);
9583bc14
EJ
4907 break;
4908 case OFPP_ALL:
feee58b9 4909 flood_packets(ctx, true, is_last_action);
9583bc14
EJ
4910 break;
4911 case OFPP_CONTROLLER:
3a11fd5b 4912 execute_controller_action(ctx, max_len,
331c07ac
YHW
4913 (ctx->in_packet_out ? OFPR_PACKET_OUT
4914 : ctx->in_group ? OFPR_GROUP
029ca940
SS
4915 : ctx->in_action_set ? OFPR_ACTION_SET
4916 : OFPR_ACTION),
bdcad671 4917 0, NULL, 0);
9583bc14
EJ
4918 break;
4919 case OFPP_NONE:
4920 break;
4921 case OFPP_LOCAL:
4922 default:
4e022ec0 4923 if (port != ctx->xin->flow.in_port.ofp_port) {
feee58b9 4924 compose_output_action(ctx, port, NULL, is_last_action);
9583bc14 4925 } else {
2d9b49dd 4926 xlate_report(ctx, OFT_WARN, "skipping output to input port");
9583bc14
EJ
4927 }
4928 break;
4929 }
4930
4931 if (prev_nf_output_iface == NF_OUT_FLOOD) {
2031ef97
BP
4932 ctx->nf_output_iface = NF_OUT_FLOOD;
4933 } else if (ctx->nf_output_iface == NF_OUT_DROP) {
4934 ctx->nf_output_iface = prev_nf_output_iface;
9583bc14 4935 } else if (prev_nf_output_iface != NF_OUT_DROP &&
2031ef97
BP
4936 ctx->nf_output_iface != NF_OUT_FLOOD) {
4937 ctx->nf_output_iface = NF_OUT_MULTI;
9583bc14
EJ
4938 }
4939}
4940
4941static void
4942xlate_output_reg_action(struct xlate_ctx *ctx,
feee58b9
AZ
4943 const struct ofpact_output_reg *or,
4944 bool is_last_action)
9583bc14
EJ
4945{
4946 uint64_t port = mf_get_subfield(&or->src, &ctx->xin->flow);
4947 if (port <= UINT16_MAX) {
2d9b49dd
BP
4948 xlate_report(ctx, OFT_DETAIL, "output port is %"PRIu64, port);
4949
9583bc14
EJ
4950 union mf_subvalue value;
4951
4952 memset(&value, 0xff, sizeof value);
49a73e0c 4953 mf_write_subfield_flow(&or->src, &value, &ctx->wc->masks);
feee58b9
AZ
4954 xlate_output_action(ctx, u16_to_ofp(port), or->max_len, false,
4955 is_last_action);
2d9b49dd
BP
4956 } else {
4957 xlate_report(ctx, OFT_WARN, "output port %"PRIu64" is out of range",
4958 port);
9583bc14
EJ
4959 }
4960}
4961
aaca4fe0
WT
4962static void
4963xlate_output_trunc_action(struct xlate_ctx *ctx,
feee58b9
AZ
4964 ofp_port_t port, uint32_t max_len,
4965 bool is_last_action)
aaca4fe0
WT
4966{
4967 bool support_trunc = ctx->xbridge->support.trunc;
4968 struct ovs_action_trunc *trunc;
2f2b904f 4969 char name[OFP10_MAX_PORT_NAME_LEN];
aaca4fe0
WT
4970
4971 switch (port) {
4972 case OFPP_TABLE:
4973 case OFPP_NORMAL:
4974 case OFPP_FLOOD:
4975 case OFPP_ALL:
4976 case OFPP_CONTROLLER:
4977 case OFPP_NONE:
50f96b10 4978 ofputil_port_to_string(port, NULL, name, sizeof name);
2d9b49dd
BP
4979 xlate_report(ctx, OFT_WARN,
4980 "output_trunc does not support port: %s", name);
aaca4fe0
WT
4981 break;
4982 case OFPP_LOCAL:
4983 case OFPP_IN_PORT:
4984 default:
4985 if (port != ctx->xin->flow.in_port.ofp_port) {
4986 const struct xport *xport = get_ofp_port(ctx->xbridge, port);
4987
4988 if (xport == NULL || xport->odp_port == ODPP_NONE) {
4989 /* Since truncate happens at its following output action, if
4990 * the output port is a patch port, the behavior is somehow
49f17344 4991 * unpredictable. For simplicity, disallow this case. */
50f96b10 4992 ofputil_port_to_string(port, NULL, name, sizeof name);
2d9b49dd
BP
4993 xlate_report_error(ctx, "output_trunc does not support "
4994 "patch port %s", name);
aaca4fe0
WT
4995 break;
4996 }
4997
4998 trunc = nl_msg_put_unspec_uninit(ctx->odp_actions,
4999 OVS_ACTION_ATTR_TRUNC,
5000 sizeof *trunc);
5001 trunc->max_len = max_len;
feee58b9 5002 xlate_output_action(ctx, port, max_len, false, is_last_action);
aaca4fe0
WT
5003 if (!support_trunc) {
5004 ctx->xout->slow |= SLOW_ACTION;
5005 }
5006 } else {
2d9b49dd 5007 xlate_report(ctx, OFT_WARN, "skipping output to input port");
aaca4fe0
WT
5008 }
5009 break;
5010 }
5011}
5012
9583bc14
EJ
5013static void
5014xlate_enqueue_action(struct xlate_ctx *ctx,
feee58b9
AZ
5015 const struct ofpact_enqueue *enqueue,
5016 bool is_last_action)
9583bc14 5017{
4e022ec0 5018 ofp_port_t ofp_port = enqueue->port;
9583bc14
EJ
5019 uint32_t queue_id = enqueue->queue;
5020 uint32_t flow_priority, priority;
5021 int error;
5022
5023 /* Translate queue to priority. */
89a8a7f0 5024 error = dpif_queue_to_priority(ctx->xbridge->dpif, queue_id, &priority);
9583bc14
EJ
5025 if (error) {
5026 /* Fall back to ordinary output action. */
feee58b9 5027 xlate_output_action(ctx, enqueue->port, 0, false, is_last_action);
9583bc14
EJ
5028 return;
5029 }
5030
5031 /* Check output port. */
5032 if (ofp_port == OFPP_IN_PORT) {
4e022ec0
AW
5033 ofp_port = ctx->xin->flow.in_port.ofp_port;
5034 } else if (ofp_port == ctx->xin->flow.in_port.ofp_port) {
9583bc14
EJ
5035 return;
5036 }
5037
5038 /* Add datapath actions. */
5039 flow_priority = ctx->xin->flow.skb_priority;
5040 ctx->xin->flow.skb_priority = priority;
feee58b9 5041 compose_output_action(ctx, ofp_port, NULL, is_last_action);
9583bc14
EJ
5042 ctx->xin->flow.skb_priority = flow_priority;
5043
5044 /* Update NetFlow output port. */
2031ef97
BP
5045 if (ctx->nf_output_iface == NF_OUT_DROP) {
5046 ctx->nf_output_iface = ofp_port;
5047 } else if (ctx->nf_output_iface != NF_OUT_FLOOD) {
5048 ctx->nf_output_iface = NF_OUT_MULTI;
9583bc14
EJ
5049 }
5050}
5051
5052static void
5053xlate_set_queue_action(struct xlate_ctx *ctx, uint32_t queue_id)
5054{
5055 uint32_t skb_priority;
5056
89a8a7f0 5057 if (!dpif_queue_to_priority(ctx->xbridge->dpif, queue_id, &skb_priority)) {
9583bc14
EJ
5058 ctx->xin->flow.skb_priority = skb_priority;
5059 } else {
5060 /* Couldn't translate queue to a priority. Nothing to do. A warning
5061 * has already been logged. */
5062 }
5063}
5064
5065static bool
46c88433 5066slave_enabled_cb(ofp_port_t ofp_port, void *xbridge_)
9583bc14 5067{
46c88433
EJ
5068 const struct xbridge *xbridge = xbridge_;
5069 struct xport *port;
9583bc14
EJ
5070
5071 switch (ofp_port) {
5072 case OFPP_IN_PORT:
5073 case OFPP_TABLE:
5074 case OFPP_NORMAL:
5075 case OFPP_FLOOD:
5076 case OFPP_ALL:
5077 case OFPP_NONE:
5078 return true;
5079 case OFPP_CONTROLLER: /* Not supported by the bundle action. */
5080 return false;
5081 default:
46c88433 5082 port = get_ofp_port(xbridge, ofp_port);
9583bc14
EJ
5083 return port ? port->may_enable : false;
5084 }
5085}
5086
5087static void
5088xlate_bundle_action(struct xlate_ctx *ctx,
feee58b9
AZ
5089 const struct ofpact_bundle *bundle,
5090 bool is_last_action)
9583bc14 5091{
4e022ec0 5092 ofp_port_t port;
9583bc14 5093
49a73e0c 5094 port = bundle_execute(bundle, &ctx->xin->flow, ctx->wc, slave_enabled_cb,
46c88433 5095 CONST_CAST(struct xbridge *, ctx->xbridge));
9583bc14 5096 if (bundle->dst.field) {
49a73e0c 5097 nxm_reg_load(&bundle->dst, ofp_to_u16(port), &ctx->xin->flow, ctx->wc);
2d9b49dd 5098 xlate_report_subfield(ctx, &bundle->dst);
9583bc14 5099 } else {
feee58b9 5100 xlate_output_action(ctx, port, 0, false, is_last_action);
9583bc14
EJ
5101 }
5102}
5103
4165b5e0
JS
5104static void
5105xlate_learn_action(struct xlate_ctx *ctx, const struct ofpact_learn *learn)
5106{
49a73e0c 5107 learn_mask(learn, ctx->wc);
9583bc14 5108
df70a773 5109 if (ctx->xin->xcache || ctx->xin->allow_side_effects) {
4165b5e0
JS
5110 uint64_t ofpacts_stub[1024 / 8];
5111 struct ofputil_flow_mod fm;
2c7ee524 5112 struct ofproto_flow_mod ofm__, *ofm;
4165b5e0 5113 struct ofpbuf ofpacts;
2c7ee524
JR
5114 enum ofperr error;
5115
5116 if (ctx->xin->xcache) {
3f3b97b0 5117 ofm = xmalloc(sizeof *ofm);
2c7ee524
JR
5118 } else {
5119 ofm = &ofm__;
5120 }
4165b5e0
JS
5121
5122 ofpbuf_use_stub(&ofpacts, ofpacts_stub, sizeof ofpacts_stub);
2c7ee524 5123 learn_execute(learn, &ctx->xin->flow, &fm, &ofpacts);
2d9b49dd
BP
5124 if (OVS_UNLIKELY(ctx->xin->trace)) {
5125 struct ds s = DS_EMPTY_INITIALIZER;
5126 ds_put_format(&s, "table=%"PRIu8" ", fm.table_id);
50f96b10 5127 match_format(&fm.match, NULL, &s, OFP_DEFAULT_PRIORITY);
2d9b49dd
BP
5128 ds_chomp(&s, ' ');
5129 ds_put_format(&s, " priority=%d", fm.priority);
5130 if (fm.new_cookie) {
5131 ds_put_format(&s, " cookie=%#"PRIx64, ntohll(fm.new_cookie));
5132 }
5133 if (fm.idle_timeout != OFP_FLOW_PERMANENT) {
5134 ds_put_format(&s, " idle=%"PRIu16, fm.idle_timeout);
5135 }
5136 if (fm.hard_timeout != OFP_FLOW_PERMANENT) {
5137 ds_put_format(&s, " hard=%"PRIu16, fm.hard_timeout);
5138 }
5139 if (fm.flags & NX_LEARN_F_SEND_FLOW_REM) {
5140 ds_put_cstr(&s, " send_flow_rem");
5141 }
5142 ds_put_cstr(&s, " actions=");
50f96b10 5143 ofpacts_format(fm.ofpacts, fm.ofpacts_len, NULL, &s);
2d9b49dd
BP
5144 xlate_report(ctx, OFT_DETAIL, "%s", ds_cstr(&s));
5145 ds_destroy(&s);
5146 }
2c7ee524
JR
5147 error = ofproto_dpif_flow_mod_init_for_learn(ctx->xbridge->ofproto,
5148 &fm, ofm);
4165b5e0 5149 ofpbuf_uninit(&ofpacts);
2c7ee524 5150
3f3b97b0 5151 if (!error) {
4c71600d 5152 bool success = true;
3f3b97b0 5153 if (ctx->xin->allow_side_effects) {
4c71600d
DDP
5154 error = ofproto_flow_mod_learn(ofm, ctx->xin->xcache != NULL,
5155 learn->limit, &success);
5156 } else if (learn->limit) {
5157 if (!ofm->temp_rule
5158 || ofm->temp_rule->state != RULE_INSERTED) {
5159 /* The learned rule expired and there are no packets, so
5160 * we cannot learn again. Since the translated actions
5161 * depend on the result of learning, we tell the caller
5162 * that there's no point in caching this result. */
5163 ctx->xout->avoid_caching = true;
5164 }
3f3b97b0
DDP
5165 }
5166
4c71600d
DDP
5167 if (learn->flags & NX_LEARN_F_WRITE_RESULT) {
5168 nxm_reg_load(&learn->result_dst, success ? 1 : 0,
5169 &ctx->xin->flow, ctx->wc);
5170 xlate_report_subfield(ctx, &learn->result_dst);
5171 }
5172
5173 if (success && ctx->xin->xcache) {
3f3b97b0
DDP
5174 struct xc_entry *entry;
5175
5176 entry = xlate_cache_add_entry(ctx->xin->xcache, XC_LEARN);
5177 entry->learn.ofm = ofm;
4c71600d 5178 entry->learn.limit = learn->limit;
3f3b97b0
DDP
5179 ofm = NULL;
5180 }
4c71600d
DDP
5181
5182 if (OVS_UNLIKELY(ctx->xin->trace && !success)) {
5183 xlate_report(ctx, OFT_DETAIL, "Limit exceeded, learn failed");
5184 }
3f3b97b0
DDP
5185 }
5186
5187 if (ctx->xin->xcache) {
5188 free(ofm);
2c7ee524
JR
5189 }
5190
5191 if (error) {
2d9b49dd
BP
5192 xlate_report_error(ctx, "LEARN action execution failed (%s).",
5193 ofperr_to_string(error));
2c7ee524 5194 }
2d9b49dd
BP
5195 } else {
5196 xlate_report(ctx, OFT_WARN,
5197 "suppressing side effects, so learn action ignored");
b256dc52
JS
5198 }
5199}
5200
5201static void
5202xlate_fin_timeout__(struct rule_dpif *rule, uint16_t tcp_flags,
5203 uint16_t idle_timeout, uint16_t hard_timeout)
5204{
5205 if (tcp_flags & (TCP_FIN | TCP_RST)) {
07a3cd5c 5206 ofproto_rule_reduce_timeouts(&rule->up, idle_timeout, hard_timeout);
b256dc52 5207 }
9583bc14
EJ
5208}
5209
9583bc14
EJ
5210static void
5211xlate_fin_timeout(struct xlate_ctx *ctx,
5212 const struct ofpact_fin_timeout *oft)
5213{
b256dc52 5214 if (ctx->rule) {
df70a773
JR
5215 if (ctx->xin->allow_side_effects) {
5216 xlate_fin_timeout__(ctx->rule, ctx->xin->tcp_flags,
5217 oft->fin_idle_timeout, oft->fin_hard_timeout);
5218 }
b256dc52
JS
5219 if (ctx->xin->xcache) {
5220 struct xc_entry *entry;
5221
5222 entry = xlate_cache_add_entry(ctx->xin->xcache, XC_FIN_TIMEOUT);
83709dfa
JR
5223 /* XC_RULE already holds a reference on the rule, none is taken
5224 * here. */
901a517e
JR
5225 entry->fin.rule = ctx->rule;
5226 entry->fin.idle = oft->fin_idle_timeout;
5227 entry->fin.hard = oft->fin_hard_timeout;
b256dc52 5228 }
9583bc14
EJ
5229 }
5230}
5231
5232static void
5233xlate_sample_action(struct xlate_ctx *ctx,
5234 const struct ofpact_sample *os)
5235{
f69f713b
BY
5236 odp_port_t output_odp_port = ODPP_NONE;
5237 odp_port_t tunnel_out_port = ODPP_NONE;
5238 struct dpif_ipfix *ipfix = ctx->xbridge->ipfix;
5239 bool emit_set_tunnel = false;
5240
5241 if (!ipfix || ctx->xin->flow.in_port.ofp_port == OFPP_NONE) {
5242 return;
5243 }
5244
e824d78d
JR
5245 /* Scale the probability from 16-bit to 32-bit while representing
5246 * the same percentage. */
5247 uint32_t probability = (os->probability << 16) | os->probability;
5248
b440dd8c 5249 if (!ctx->xbridge->support.variable_length_userdata) {
2d9b49dd
BP
5250 xlate_report_error(ctx, "ignoring NXAST_SAMPLE action because "
5251 "datapath lacks support (needs Linux 3.10+ or "
5252 "kernel module from OVS 1.11+)");
e824d78d
JR
5253 return;
5254 }
5255
f69f713b
BY
5256 /* If ofp_port in flow sample action is equel to ofp_port,
5257 * this sample action is a input port action. */
5258 if (os->sampling_port != OFPP_NONE &&
5259 os->sampling_port != ctx->xin->flow.in_port.ofp_port) {
5260 output_odp_port = ofp_port_to_odp_port(ctx->xbridge,
5261 os->sampling_port);
5262 if (output_odp_port == ODPP_NONE) {
2d9b49dd
BP
5263 xlate_report_error(ctx, "can't use unknown port %d in flow sample "
5264 "action", os->sampling_port);
f69f713b
BY
5265 return;
5266 }
5267
5268 if (dpif_ipfix_get_flow_exporter_tunnel_sampling(ipfix,
5269 os->collector_set_id)
5270 && dpif_ipfix_get_tunnel_port(ipfix, output_odp_port)) {
5271 tunnel_out_port = output_odp_port;
5272 emit_set_tunnel = true;
5273 }
5274 }
5275
5276 xlate_commit_actions(ctx);
5277 /* If 'emit_set_tunnel', sample(sampling_port=1) would translate
5278 * into datapath sample action set(tunnel(...)), sample(...) and
5279 * it is used for sampling egress tunnel information. */
5280 if (emit_set_tunnel) {
5281 const struct xport *xport = get_ofp_port(ctx->xbridge,
5282 os->sampling_port);
5283
5284 if (xport && xport->is_tunnel) {
5285 struct flow *flow = &ctx->xin->flow;
5286 tnl_port_send(xport->ofport, flow, ctx->wc);
5287 if (!ovs_native_tunneling_is_on(ctx->xbridge->ofproto)) {
5288 struct flow_tnl flow_tnl = flow->tunnel;
5289
5290 commit_odp_tunnel_action(flow, &ctx->base_flow,
5291 ctx->odp_actions);
5292 flow->tunnel = flow_tnl;
5293 }
5294 } else {
2d9b49dd
BP
5295 xlate_report_error(ctx,
5296 "sampling_port:%d should be a tunnel port.",
5297 os->sampling_port);
f69f713b
BY
5298 }
5299 }
e824d78d 5300
a6092018
BP
5301 union user_action_cookie cookie = {
5302 .flow_sample = {
5303 .type = USER_ACTION_COOKIE_FLOW_SAMPLE,
5304 .probability = os->probability,
5305 .collector_set_id = os->collector_set_id,
5306 .obs_domain_id = os->obs_domain_id,
5307 .obs_point_id = os->obs_point_id,
f69f713b 5308 .output_odp_port = output_odp_port,
4930ea56 5309 .direction = os->direction,
a6092018
BP
5310 }
5311 };
5312 compose_sample_action(ctx, probability, &cookie, sizeof cookie.flow_sample,
f69f713b 5313 tunnel_out_port, false);
9583bc14
EJ
5314}
5315
eee69393
AZ
5316/* Determine if an datapath action translated from the openflow action
5317 * can be reversed by another datapath action.
5318 *
5319 * Openflow actions that do not emit datapath actions are trivially
5320 * reversible. Reversiblity of other actions depends on nature of
5321 * action and their translation. */
5322static bool
5323reversible_actions(const struct ofpact *ofpacts, size_t ofpacts_len)
bef503e8 5324{
eee69393 5325 const struct ofpact *a;
bef503e8 5326
eee69393
AZ
5327 OFPACT_FOR_EACH (a, ofpacts, ofpacts_len) {
5328 switch (a->type) {
5329 case OFPACT_BUNDLE:
5330 case OFPACT_CLEAR_ACTIONS:
5331 case OFPACT_CLONE:
5332 case OFPACT_CONJUNCTION:
5333 case OFPACT_CONTROLLER:
5334 case OFPACT_CT_CLEAR:
5335 case OFPACT_DEBUG_RECIRC:
5336 case OFPACT_DEC_MPLS_TTL:
5337 case OFPACT_DEC_TTL:
5338 case OFPACT_ENQUEUE:
5339 case OFPACT_EXIT:
5340 case OFPACT_FIN_TIMEOUT:
5341 case OFPACT_GOTO_TABLE:
5342 case OFPACT_GROUP:
5343 case OFPACT_LEARN:
5344 case OFPACT_MULTIPATH:
5345 case OFPACT_NOTE:
5346 case OFPACT_OUTPUT:
5347 case OFPACT_OUTPUT_REG:
5348 case OFPACT_POP_MPLS:
5349 case OFPACT_POP_QUEUE:
5350 case OFPACT_PUSH_MPLS:
5351 case OFPACT_PUSH_VLAN:
5352 case OFPACT_REG_MOVE:
5353 case OFPACT_RESUBMIT:
5354 case OFPACT_SAMPLE:
5355 case OFPACT_SET_ETH_DST:
5356 case OFPACT_SET_ETH_SRC:
5357 case OFPACT_SET_FIELD:
5358 case OFPACT_SET_IP_DSCP:
5359 case OFPACT_SET_IP_ECN:
5360 case OFPACT_SET_IP_TTL:
5361 case OFPACT_SET_IPV4_DST:
5362 case OFPACT_SET_IPV4_SRC:
5363 case OFPACT_SET_L4_DST_PORT:
5364 case OFPACT_SET_L4_SRC_PORT:
5365 case OFPACT_SET_MPLS_LABEL:
5366 case OFPACT_SET_MPLS_TC:
5367 case OFPACT_SET_MPLS_TTL:
5368 case OFPACT_SET_QUEUE:
5369 case OFPACT_SET_TUNNEL:
5370 case OFPACT_SET_VLAN_PCP:
5371 case OFPACT_SET_VLAN_VID:
5372 case OFPACT_STACK_POP:
5373 case OFPACT_STACK_PUSH:
5374 case OFPACT_STRIP_VLAN:
5375 case OFPACT_UNROLL_XLATE:
5376 case OFPACT_WRITE_ACTIONS:
5377 case OFPACT_WRITE_METADATA:
5378 break;
5379
5380 case OFPACT_CT:
5381 case OFPACT_METER:
5382 case OFPACT_NAT:
5383 case OFPACT_OUTPUT_TRUNC:
2142be1f
BP
5384 case OFPACT_ENCAP:
5385 case OFPACT_DECAP:
eee69393 5386 return false;
9c2a44dc 5387 }
456024cb 5388 }
eee69393 5389 return true;
bef503e8
AZ
5390}
5391
5392static void
c9f0a445 5393clone_xlate_actions(const struct ofpact *actions, size_t actions_len,
feee58b9 5394 struct xlate_ctx *ctx, bool is_last_action)
7ae62a67 5395{
b827b231
BP
5396 struct ofpbuf old_stack = ctx->stack;
5397 union mf_subvalue new_stack[1024 / sizeof(union mf_subvalue)];
5398 ofpbuf_use_stub(&ctx->stack, new_stack, sizeof new_stack);
5399 ofpbuf_put(&ctx->stack, old_stack.data, old_stack.size);
5400
5401 struct ofpbuf old_action_set = ctx->action_set;
5402 uint64_t actset_stub[1024 / 8];
5403 ofpbuf_use_stub(&ctx->action_set, actset_stub, sizeof actset_stub);
5404 ofpbuf_put(&ctx->action_set, old_action_set.data, old_action_set.size);
5405
eee69393 5406 size_t offset, ac_offset;
eee69393
AZ
5407 struct flow old_flow = ctx->xin->flow;
5408
feee58b9 5409 if (reversible_actions(actions, actions_len) || is_last_action) {
eee69393 5410 old_flow = ctx->xin->flow;
feee58b9 5411 do_xlate_actions(actions, actions_len, ctx, is_last_action);
60eebf12
AZ
5412 if (ctx->freezing) {
5413 finish_freezing(ctx);
5414 }
eee69393
AZ
5415 goto xlate_done;
5416 }
5417
5418 /* Commit datapath actions before emitting the clone action to
5419 * avoid emitting those actions twice. Once inside
5420 * the clone, another time for the action after clone. */
5421 xlate_commit_actions(ctx);
9c2a44dc 5422 struct flow old_base = ctx->base_flow;
eee69393
AZ
5423 bool old_was_mpls = ctx->was_mpls;
5424 bool old_conntracked = ctx->conntracked;
ba653d2a 5425
eee69393
AZ
5426 /* The actions are not reversible, a datapath clone action is
5427 * required to encode the translation. Select the clone action
5428 * based on datapath capabilities. */
5429 if (ctx->xbridge->support.clone) { /* Use clone action */
5430 /* Use clone action as datapath clone. */
5431 offset = nl_msg_start_nested(ctx->odp_actions, OVS_ACTION_ATTR_CLONE);
feee58b9 5432 do_xlate_actions(actions, actions_len, ctx, true);
60eebf12
AZ
5433 if (ctx->freezing) {
5434 finish_freezing(ctx);
5435 }
eee69393
AZ
5436 nl_msg_end_non_empty_nested(ctx->odp_actions, offset);
5437 goto dp_clone_done;
5438 }
b827b231 5439
eee69393
AZ
5440 if (ctx->xbridge->support.sample_nesting > 3) {
5441 /* Use sample action as datapath clone. */
5442 offset = nl_msg_start_nested(ctx->odp_actions, OVS_ACTION_ATTR_SAMPLE);
5443 ac_offset = nl_msg_start_nested(ctx->odp_actions,
5444 OVS_SAMPLE_ATTR_ACTIONS);
feee58b9 5445 do_xlate_actions(actions, actions_len, ctx, true);
60eebf12
AZ
5446 if (ctx->freezing) {
5447 finish_freezing(ctx);
5448 }
eee69393
AZ
5449 if (nl_msg_end_non_empty_nested(ctx->odp_actions, ac_offset)) {
5450 nl_msg_cancel_nested(ctx->odp_actions, offset);
5451 } else {
5452 nl_msg_put_u32(ctx->odp_actions, OVS_SAMPLE_ATTR_PROBABILITY,
5453 UINT32_MAX); /* 100% probability. */
5454 nl_msg_end_nested(ctx->odp_actions, offset);
5455 }
5456 goto dp_clone_done;
5457 }
5458
5459 /* Datapath does not support clone, skip xlate 'oc' and
5460 * report an error */
5461 xlate_report_error(ctx, "Failed to compose clone action");
ba653d2a 5462
eee69393 5463dp_clone_done:
ba653d2a
BP
5464 /* The clone's conntrack execution should have no effect on the original
5465 * packet. */
5466 ctx->conntracked = old_conntracked;
bd3c2df3
BP
5467
5468 /* Popping MPLS from the clone should have no effect on the original
5469 * packet. */
5470 ctx->was_mpls = old_was_mpls;
eee69393
AZ
5471
5472 /* Restore the 'base_flow' for the next action. */
5473 ctx->base_flow = old_base;
5474
5475xlate_done:
5476 ofpbuf_uninit(&ctx->action_set);
5477 ctx->action_set = old_action_set;
5478 ofpbuf_uninit(&ctx->stack);
5479 ctx->stack = old_stack;
5480 ctx->xin->flow = old_flow;
7ae62a67
WT
5481}
5482
c9f0a445 5483static void
feee58b9
AZ
5484compose_clone(struct xlate_ctx *ctx, const struct ofpact_nest *oc,
5485 bool is_last_action)
c9f0a445
AZ
5486{
5487 size_t oc_actions_len = ofpact_nest_get_action_len(oc);
5488
feee58b9 5489 clone_xlate_actions(oc->actions, oc_actions_len, ctx, is_last_action);
c9f0a445
AZ
5490}
5491
076caa2f
JR
5492static void
5493xlate_meter_action(struct xlate_ctx *ctx, const struct ofpact_meter *meter)
5494{
5495 if (meter->provider_meter_id != UINT32_MAX) {
5496 nl_msg_put_u32(ctx->odp_actions, OVS_ACTION_ATTR_METER,
5497 meter->provider_meter_id);
5498 }
5499}
5500
9583bc14 5501static bool
46c88433 5502may_receive(const struct xport *xport, struct xlate_ctx *ctx)
9583bc14 5503{
bbbca389 5504 if (xport->config & (is_stp(&ctx->xin->flow)
46c88433
EJ
5505 ? OFPUTIL_PC_NO_RECV_STP
5506 : OFPUTIL_PC_NO_RECV)) {
9583bc14
EJ
5507 return false;
5508 }
5509
5510 /* Only drop packets here if both forwarding and learning are
5511 * disabled. If just learning is enabled, we need to have
5512 * OFPP_NORMAL and the learning action have a look at the packet
5513 * before we can drop it. */
9efd308e
DV
5514 if ((!xport_stp_forward_state(xport) && !xport_stp_learn_state(xport)) ||
5515 (!xport_rstp_forward_state(xport) && !xport_rstp_learn_state(xport))) {
9583bc14
EJ
5516 return false;
5517 }
5518
5519 return true;
5520}
5521
7fdb60a7 5522static void
7e7e8dbb
BP
5523xlate_write_actions__(struct xlate_ctx *ctx,
5524 const struct ofpact *ofpacts, size_t ofpacts_len)
7fdb60a7 5525{
c61f3870
BP
5526 /* Maintain actset_output depending on the contents of the action set:
5527 *
5528 * - OFPP_UNSET, if there is no "output" action.
5529 *
5530 * - The output port, if there is an "output" action and no "group"
5531 * action.
5532 *
5533 * - OFPP_UNSET, if there is a "group" action.
5534 */
5535 if (!ctx->action_set_has_group) {
7e7e8dbb
BP
5536 const struct ofpact *a;
5537 OFPACT_FOR_EACH (a, ofpacts, ofpacts_len) {
5538 if (a->type == OFPACT_OUTPUT) {
5539 ctx->xin->flow.actset_output = ofpact_get_OUTPUT(a)->port;
5540 } else if (a->type == OFPACT_GROUP) {
c61f3870
BP
5541 ctx->xin->flow.actset_output = OFPP_UNSET;
5542 ctx->action_set_has_group = true;
9055ca9a 5543 break;
c61f3870
BP
5544 }
5545 }
5546 }
5547
7e7e8dbb
BP
5548 ofpbuf_put(&ctx->action_set, ofpacts, ofpacts_len);
5549}
5550
5551static void
5552xlate_write_actions(struct xlate_ctx *ctx, const struct ofpact_nest *a)
5553{
5554 xlate_write_actions__(ctx, a->actions, ofpact_nest_get_action_len(a));
7fdb60a7
SH
5555}
5556
5557static void
5558xlate_action_set(struct xlate_ctx *ctx)
5559{
2d9b49dd
BP
5560 uint64_t action_list_stub[1024 / 8];
5561 struct ofpbuf action_list = OFPBUF_STUB_INITIALIZER(action_list_stub);
7fdb60a7 5562 ofpacts_execute_action_set(&action_list, &ctx->action_set);
ed9c9e3e
JR
5563 /* Clear the action set, as it is not needed any more. */
5564 ofpbuf_clear(&ctx->action_set);
2d9b49dd
BP
5565 if (action_list.size) {
5566 ctx->in_action_set = true;
5567
5568 struct ovs_list *old_trace = ctx->xin->trace;
5569 ctx->xin->trace = xlate_report(ctx, OFT_TABLE,
5570 "--. Executing action set:");
feee58b9 5571 do_xlate_actions(action_list.data, action_list.size, ctx, true);
2d9b49dd
BP
5572 ctx->xin->trace = old_trace;
5573
5574 ctx->in_action_set = false;
5575 }
7fdb60a7
SH
5576 ofpbuf_uninit(&action_list);
5577}
5578
e672ff9b 5579static void
1d361a81 5580freeze_put_unroll_xlate(struct xlate_ctx *ctx)
e672ff9b 5581{
1d361a81 5582 struct ofpact_unroll_xlate *unroll = ctx->frozen_actions.header;
e672ff9b
JR
5583
5584 /* Restore the table_id and rule cookie for a potential PACKET
5585 * IN if needed. */
5586 if (!unroll ||
5587 (ctx->table_id != unroll->rule_table_id
5588 || ctx->rule_cookie != unroll->rule_cookie)) {
1d361a81 5589 unroll = ofpact_put_UNROLL_XLATE(&ctx->frozen_actions);
e672ff9b
JR
5590 unroll->rule_table_id = ctx->table_id;
5591 unroll->rule_cookie = ctx->rule_cookie;
1d361a81 5592 ctx->frozen_actions.header = unroll;
e672ff9b
JR
5593 }
5594}
5595
5596
1d361a81
BP
5597/* Copy actions 'a' through 'end' to ctx->frozen_actions, which will be
5598 * executed after thawing. Inserts an UNROLL_XLATE action, if none is already
5599 * present, before any action that may depend on the current table ID or flow
5600 * cookie. */
e672ff9b 5601static void
1d361a81 5602freeze_unroll_actions(const struct ofpact *a, const struct ofpact *end,
e672ff9b
JR
5603 struct xlate_ctx *ctx)
5604{
c2b283b7 5605 for (; a < end; a = ofpact_next(a)) {
e672ff9b 5606 switch (a->type) {
e672ff9b 5607 case OFPACT_OUTPUT_REG:
aaca4fe0 5608 case OFPACT_OUTPUT_TRUNC:
e672ff9b
JR
5609 case OFPACT_GROUP:
5610 case OFPACT_OUTPUT:
5611 case OFPACT_CONTROLLER:
5612 case OFPACT_DEC_MPLS_TTL:
5613 case OFPACT_DEC_TTL:
83a31283
BP
5614 /* These actions may generate asynchronous messages, which include
5615 * table ID and flow cookie information. */
1d361a81 5616 freeze_put_unroll_xlate(ctx);
e672ff9b
JR
5617 break;
5618
83a31283
BP
5619 case OFPACT_RESUBMIT:
5620 if (ofpact_get_RESUBMIT(a)->table_id == 0xff) {
5621 /* This resubmit action is relative to the current table, so we
5622 * need to track what table that is.*/
1d361a81 5623 freeze_put_unroll_xlate(ctx);
83a31283
BP
5624 }
5625 break;
5626
e672ff9b
JR
5627 case OFPACT_SET_TUNNEL:
5628 case OFPACT_REG_MOVE:
5629 case OFPACT_SET_FIELD:
5630 case OFPACT_STACK_PUSH:
5631 case OFPACT_STACK_POP:
5632 case OFPACT_LEARN:
5633 case OFPACT_WRITE_METADATA:
83a31283 5634 case OFPACT_GOTO_TABLE:
e672ff9b
JR
5635 case OFPACT_ENQUEUE:
5636 case OFPACT_SET_VLAN_VID:
5637 case OFPACT_SET_VLAN_PCP:
5638 case OFPACT_STRIP_VLAN:
5639 case OFPACT_PUSH_VLAN:
5640 case OFPACT_SET_ETH_SRC:
5641 case OFPACT_SET_ETH_DST:
5642 case OFPACT_SET_IPV4_SRC:
5643 case OFPACT_SET_IPV4_DST:
5644 case OFPACT_SET_IP_DSCP:
5645 case OFPACT_SET_IP_ECN:
5646 case OFPACT_SET_IP_TTL:
5647 case OFPACT_SET_L4_SRC_PORT:
5648 case OFPACT_SET_L4_DST_PORT:
5649 case OFPACT_SET_QUEUE:
5650 case OFPACT_POP_QUEUE:
5651 case OFPACT_PUSH_MPLS:
5652 case OFPACT_POP_MPLS:
5653 case OFPACT_SET_MPLS_LABEL:
5654 case OFPACT_SET_MPLS_TC:
5655 case OFPACT_SET_MPLS_TTL:
5656 case OFPACT_MULTIPATH:
5657 case OFPACT_BUNDLE:
5658 case OFPACT_EXIT:
5659 case OFPACT_UNROLL_XLATE:
5660 case OFPACT_FIN_TIMEOUT:
5661 case OFPACT_CLEAR_ACTIONS:
5662 case OFPACT_WRITE_ACTIONS:
5663 case OFPACT_METER:
5664 case OFPACT_SAMPLE:
7ae62a67 5665 case OFPACT_CLONE:
f839892a
JS
5666 case OFPACT_ENCAP:
5667 case OFPACT_DECAP:
d4abaff5 5668 case OFPACT_DEBUG_RECIRC:
07659514 5669 case OFPACT_CT:
72fe7578 5670 case OFPACT_CT_CLEAR:
9ac0aada 5671 case OFPACT_NAT:
83a31283 5672 /* These may not generate PACKET INs. */
e672ff9b
JR
5673 break;
5674
e672ff9b
JR
5675 case OFPACT_NOTE:
5676 case OFPACT_CONJUNCTION:
83a31283 5677 /* These need not be copied for restoration. */
e672ff9b
JR
5678 continue;
5679 }
5680 /* Copy the action over. */
1d361a81 5681 ofpbuf_put(&ctx->frozen_actions, a, OFPACT_ALIGN(a->len));
e672ff9b
JR
5682 }
5683}
5684
8e53fe8c 5685static void
f2d105b5
JS
5686put_ct_mark(const struct flow *flow, struct ofpbuf *odp_actions,
5687 struct flow_wildcards *wc)
8e53fe8c 5688{
2a754f4a
JS
5689 if (wc->masks.ct_mark) {
5690 struct {
5691 uint32_t key;
5692 uint32_t mask;
5693 } *odp_ct_mark;
5694
5695 odp_ct_mark = nl_msg_put_unspec_uninit(odp_actions, OVS_CT_ATTR_MARK,
5696 sizeof(*odp_ct_mark));
5697 odp_ct_mark->key = flow->ct_mark & wc->masks.ct_mark;
5698 odp_ct_mark->mask = wc->masks.ct_mark;
8e53fe8c
JS
5699 }
5700}
5701
9daf2348 5702static void
f2d105b5
JS
5703put_ct_label(const struct flow *flow, struct ofpbuf *odp_actions,
5704 struct flow_wildcards *wc)
9daf2348 5705{
2ff8484b 5706 if (!ovs_u128_is_zero(wc->masks.ct_label)) {
9daf2348
JS
5707 struct {
5708 ovs_u128 key;
5709 ovs_u128 mask;
89cf41ec 5710 } odp_ct_label;
9daf2348 5711
89cf41ec
BP
5712 odp_ct_label.key = ovs_u128_and(flow->ct_label, wc->masks.ct_label);
5713 odp_ct_label.mask = wc->masks.ct_label;
5714 nl_msg_put_unspec(odp_actions, OVS_CT_ATTR_LABELS,
5715 &odp_ct_label, sizeof odp_ct_label);
9daf2348
JS
5716 }
5717}
5718
d787ad39 5719static void
2d9b49dd
BP
5720put_ct_helper(struct xlate_ctx *ctx,
5721 struct ofpbuf *odp_actions, struct ofpact_conntrack *ofc)
d787ad39
JS
5722{
5723 if (ofc->alg) {
40c7b2fc
JS
5724 switch(ofc->alg) {
5725 case IPPORT_FTP:
d787ad39 5726 nl_msg_put_string(odp_actions, OVS_CT_ATTR_HELPER, "ftp");
40c7b2fc
JS
5727 break;
5728 case IPPORT_TFTP:
5729 nl_msg_put_string(odp_actions, OVS_CT_ATTR_HELPER, "tftp");
5730 break;
5731 default:
2d9b49dd 5732 xlate_report_error(ctx, "cannot serialize ct_helper %d", ofc->alg);
40c7b2fc 5733 break;
d787ad39
JS
5734 }
5735 }
5736}
5737
9ac0aada
JR
5738static void
5739put_ct_nat(struct xlate_ctx *ctx)
5740{
5741 struct ofpact_nat *ofn = ctx->ct_nat_action;
5742 size_t nat_offset;
5743
5744 if (!ofn) {
5745 return;
5746 }
5747
5748 nat_offset = nl_msg_start_nested(ctx->odp_actions, OVS_CT_ATTR_NAT);
5749 if (ofn->flags & NX_NAT_F_SRC || ofn->flags & NX_NAT_F_DST) {
5750 nl_msg_put_flag(ctx->odp_actions, ofn->flags & NX_NAT_F_SRC
5751 ? OVS_NAT_ATTR_SRC : OVS_NAT_ATTR_DST);
5752 if (ofn->flags & NX_NAT_F_PERSISTENT) {
5753 nl_msg_put_flag(ctx->odp_actions, OVS_NAT_ATTR_PERSISTENT);
5754 }
5755 if (ofn->flags & NX_NAT_F_PROTO_HASH) {
5756 nl_msg_put_flag(ctx->odp_actions, OVS_NAT_ATTR_PROTO_HASH);
5757 } else if (ofn->flags & NX_NAT_F_PROTO_RANDOM) {
5758 nl_msg_put_flag(ctx->odp_actions, OVS_NAT_ATTR_PROTO_RANDOM);
5759 }
5760 if (ofn->range_af == AF_INET) {
73e8bc23 5761 nl_msg_put_be32(ctx->odp_actions, OVS_NAT_ATTR_IP_MIN,
9ac0aada
JR
5762 ofn->range.addr.ipv4.min);
5763 if (ofn->range.addr.ipv4.max &&
73e8bc23
BP
5764 (ntohl(ofn->range.addr.ipv4.max)
5765 > ntohl(ofn->range.addr.ipv4.min))) {
5766 nl_msg_put_be32(ctx->odp_actions, OVS_NAT_ATTR_IP_MAX,
5767 ofn->range.addr.ipv4.max);
9ac0aada
JR
5768 }
5769 } else if (ofn->range_af == AF_INET6) {
5770 nl_msg_put_unspec(ctx->odp_actions, OVS_NAT_ATTR_IP_MIN,
5771 &ofn->range.addr.ipv6.min,
5772 sizeof ofn->range.addr.ipv6.min);
5773 if (!ipv6_mask_is_any(&ofn->range.addr.ipv6.max) &&
5774 memcmp(&ofn->range.addr.ipv6.max, &ofn->range.addr.ipv6.min,
5775 sizeof ofn->range.addr.ipv6.max) > 0) {
5776 nl_msg_put_unspec(ctx->odp_actions, OVS_NAT_ATTR_IP_MAX,
5777 &ofn->range.addr.ipv6.max,
5778 sizeof ofn->range.addr.ipv6.max);
5779 }
5780 }
5781 if (ofn->range_af != AF_UNSPEC && ofn->range.proto.min) {
5782 nl_msg_put_u16(ctx->odp_actions, OVS_NAT_ATTR_PROTO_MIN,
5783 ofn->range.proto.min);
5784 if (ofn->range.proto.max &&
5785 ofn->range.proto.max > ofn->range.proto.min) {
5786 nl_msg_put_u16(ctx->odp_actions, OVS_NAT_ATTR_PROTO_MAX,
5787 ofn->range.proto.max);
5788 }
5789 }
5790 }
5791 nl_msg_end_nested(ctx->odp_actions, nat_offset);
5792}
5793
07659514 5794static void
feee58b9
AZ
5795compose_conntrack_action(struct xlate_ctx *ctx, struct ofpact_conntrack *ofc,
5796 bool is_last_action)
07659514 5797{
f2d105b5 5798 ovs_u128 old_ct_label_mask = ctx->wc->masks.ct_label;
f2d105b5 5799 uint32_t old_ct_mark_mask = ctx->wc->masks.ct_mark;
07659514
JS
5800 size_t ct_offset;
5801 uint16_t zone;
5802
5803 /* Ensure that any prior actions are applied before composing the new
5804 * conntrack action. */
5805 xlate_commit_actions(ctx);
5806
8e53fe8c 5807 /* Process nested actions first, to populate the key. */
9ac0aada 5808 ctx->ct_nat_action = NULL;
f2d105b5 5809 ctx->wc->masks.ct_mark = 0;
f6fabcc6 5810 ctx->wc->masks.ct_label = OVS_U128_ZERO;
feee58b9
AZ
5811 do_xlate_actions(ofc->actions, ofpact_ct_get_action_len(ofc), ctx,
5812 is_last_action);
8e53fe8c 5813
07659514
JS
5814 if (ofc->zone_src.field) {
5815 zone = mf_get_subfield(&ofc->zone_src, &ctx->xin->flow);
5816 } else {
5817 zone = ofc->zone_imm;
5818 }
5819
5820 ct_offset = nl_msg_start_nested(ctx->odp_actions, OVS_ACTION_ATTR_CT);
5821 if (ofc->flags & NX_CT_F_COMMIT) {
a76a37ef
JR
5822 nl_msg_put_flag(ctx->odp_actions, ofc->flags & NX_CT_F_FORCE ?
5823 OVS_CT_ATTR_FORCE_COMMIT : OVS_CT_ATTR_COMMIT);
adfe7a0b
JR
5824 if (ctx->xbridge->support.ct_eventmask) {
5825 nl_msg_put_u32(ctx->odp_actions, OVS_CT_ATTR_EVENTMASK,
975954af 5826 OVS_CT_EVENTMASK_DEFAULT);
adfe7a0b 5827 }
07659514
JS
5828 }
5829 nl_msg_put_u16(ctx->odp_actions, OVS_CT_ATTR_ZONE, zone);
f2d105b5
JS
5830 put_ct_mark(&ctx->xin->flow, ctx->odp_actions, ctx->wc);
5831 put_ct_label(&ctx->xin->flow, ctx->odp_actions, ctx->wc);
2d9b49dd 5832 put_ct_helper(ctx, ctx->odp_actions, ofc);
9ac0aada
JR
5833 put_ct_nat(ctx);
5834 ctx->ct_nat_action = NULL;
07659514
JS
5835 nl_msg_end_nested(ctx->odp_actions, ct_offset);
5836
f2d105b5 5837 ctx->wc->masks.ct_mark = old_ct_mark_mask;
f2d105b5 5838 ctx->wc->masks.ct_label = old_ct_label_mask;
8e53fe8c 5839
f6fabcc6 5840 if (ofc->recirc_table != NX_CT_RECIRC_NONE) {
07659514 5841 ctx->conntracked = true;
e37b8437 5842 compose_recirculate_and_fork(ctx, ofc->recirc_table);
07659514 5843 }
f6fabcc6
JP
5844
5845 /* The ct_* fields are only available in the scope of the 'recirc_table'
5846 * call chain. */
5847 flow_clear_conntrack(&ctx->xin->flow);
5848 ctx->conntracked = false;
07659514
JS
5849}
5850
f839892a
JS
5851static void
5852rewrite_flow_encap_ethernet(struct xlate_ctx *ctx,
5853 struct flow *flow,
5854 struct flow_wildcards *wc)
5855{
5856 wc->masks.packet_type = OVS_BE32_MAX;
5857 if (pt_ns(flow->packet_type) == OFPHTN_ETHERTYPE) {
5858 /* Only adjust the packet_type and zero the dummy Ethernet addresses. */
5859 ovs_be16 ethertype = pt_ns_type_be(flow->packet_type);
5860 flow->packet_type = htonl(PT_ETH);
5861 flow->dl_src = eth_addr_zero;
5862 flow->dl_dst = eth_addr_zero;
5863 flow->dl_type = ethertype;
5864 } else {
1fc11c59 5865 /* Error handling: drop packet. */
f839892a 5866 xlate_report_debug(ctx, OFT_ACTION,
1fc11c59
JS
5867 "Dropping packet as encap(ethernet) is not "
5868 "supported for packet type ethernet.");
f839892a
JS
5869 ctx->error = 1;
5870 }
5871}
5872
1fc11c59
JS
5873/* For an MD2 NSH header returns a pointer to an ofpbuf with the encoded
5874 * MD2 TLVs provided as encap properties to the encap operation. This
5875 * will be stored as encap_data in the ctx and copied into the encap_nsh
5876 * action at the next commit. */
5877static struct ofpbuf *
5878rewrite_flow_encap_nsh(struct xlate_ctx *ctx,
5879 const struct ofpact_encap *encap,
5880 struct flow *flow,
5881 struct flow_wildcards *wc)
5882{
5883 ovs_be32 packet_type = flow->packet_type;
5884 const char *ptr = (char *) encap->props;
5885 struct ofpbuf *buf = ofpbuf_new(OVS_ENCAP_NSH_MAX_MD_LEN);
5886 uint8_t md_type = NSH_M_TYPE1;
5887 uint8_t np = 0;
5888 int i;
5889
5890 /* Scan the optional NSH encap TLV properties, if any. */
5891 for (i = 0; i < encap->n_props; i++) {
5892 struct ofpact_ed_prop *prop_ptr =
5893 ALIGNED_CAST(struct ofpact_ed_prop *, ptr);
5894 if (prop_ptr->prop_class == OFPPPC_NSH) {
5895 switch (prop_ptr->type) {
5896 case OFPPPT_PROP_NSH_MDTYPE: {
5897 struct ofpact_ed_prop_nsh_md_type *prop_md_type =
5898 ALIGNED_CAST(struct ofpact_ed_prop_nsh_md_type *,
5899 prop_ptr);
5900 md_type = prop_md_type->md_type;
5901 break;
5902 }
5903 case OFPPPT_PROP_NSH_TLV: {
5904 struct ofpact_ed_prop_nsh_tlv *tlv_prop =
5905 ALIGNED_CAST(struct ofpact_ed_prop_nsh_tlv *,
5906 prop_ptr);
5907 struct nsh_md2_tlv *md2_ctx =
5908 ofpbuf_put_uninit(buf, sizeof(*md2_ctx));
5909 md2_ctx->md_class = tlv_prop->tlv_class;
5910 md2_ctx->type = tlv_prop->tlv_type;
5911 md2_ctx->length = tlv_prop->tlv_len;
5912 size_t len = ROUND_UP(md2_ctx->length, 4);
5913 size_t padding = len - md2_ctx->length;
5914 ofpbuf_put(buf, tlv_prop->data, md2_ctx->length);
5915 ofpbuf_put_zeros(buf, padding);
5916 break;
5917 }
5918 default:
5919 /* No other NSH encap properties defined yet. */
5920 break;
5921 }
5922 }
5923 ptr += ROUND_UP(prop_ptr->len, 8);
5924 }
5925 if (buf->size == 0 || buf->size > OVS_ENCAP_NSH_MAX_MD_LEN) {
5926 ofpbuf_delete(buf);
5927 buf = NULL;
5928 }
5929
5930 /* Determine the Next Protocol field for NSH header. */
5931 switch (ntohl(packet_type)) {
5932 case PT_ETH:
5933 np = NSH_P_ETHERNET;
5934 break;
5935 case PT_IPV4:
5936 np = NSH_P_IPV4;
5937 break;
5938 case PT_IPV6:
5939 np = NSH_P_IPV6;
5940 break;
5941 case PT_NSH:
5942 np = NSH_P_NSH;
5943 break;
5944 default:
5945 /* Error handling: drop packet. */
5946 xlate_report_debug(ctx, OFT_ACTION,
5947 "Dropping packet as encap(nsh) is not "
5948 "supported for packet type (%d,0x%x)",
5949 pt_ns(packet_type), pt_ns_type(packet_type));
5950 ctx->error = 1;
5951 return buf;
5952 }
5953 /* Note that we have matched on packet_type! */
5954 wc->masks.packet_type = OVS_BE32_MAX;
5955
5956 /* Reset all current flow packet headers. */
5957 memset(&flow->dl_dst, 0,
5958 sizeof(struct flow) - offsetof(struct flow, dl_dst));
5959
5960 /* Populate the flow with the new NSH header. */
5961 flow->packet_type = htonl(PT_NSH);
5962 flow->dl_type = htons(ETH_TYPE_NSH);
5963 flow->nsh.flags = 0; /* */
5964 flow->nsh.np = np;
5965 flow->nsh.spi = 0;
5966 flow->nsh.si = 255;
5967
5968 if (md_type == NSH_M_TYPE1) {
5969 flow->nsh.mdtype = NSH_M_TYPE1;
5970 memset(flow->nsh.c, 0, sizeof flow->nsh.c);
5971 if (buf) {
5972 /* Drop any MD2 context TLVs. */
5973 ofpbuf_delete(buf);
5974 buf = NULL;
5975 }
5976 } else if (md_type == NSH_M_TYPE2) {
5977 flow->nsh.mdtype = NSH_M_TYPE2;
5978 }
5979
5980 return buf;
5981}
5982
f839892a
JS
5983static void
5984xlate_generic_encap_action(struct xlate_ctx *ctx,
5985 const struct ofpact_encap *encap)
5986{
5987 struct flow *flow = &ctx->xin->flow;
5988 struct flow_wildcards *wc = ctx->wc;
1fc11c59 5989 struct ofpbuf *encap_data = NULL;
f839892a
JS
5990
5991 /* Ensure that any pending actions on the inner packet are applied before
5992 * rewriting the flow */
5993 xlate_commit_actions(ctx);
5994
5995 /* Rewrite the flow to reflect the effect of pushing the new encap header. */
5996 switch (ntohl(encap->new_pkt_type)) {
5997 case PT_ETH:
5998 rewrite_flow_encap_ethernet(ctx, flow, wc);
5999 break;
1fc11c59
JS
6000 case PT_NSH:
6001 encap_data = rewrite_flow_encap_nsh(ctx, encap, flow, wc);
6002 break;
f839892a 6003 default:
1fc11c59
JS
6004 /* New packet type was checked during decoding. */
6005 OVS_NOT_REACHED();
f839892a
JS
6006 break;
6007 }
6008
6009 if (!ctx->error) {
6010 /* The actual encap datapath action will be generated at next commit. */
6011 ctx->pending_encap = true;
1fc11c59 6012 ctx->encap_data = encap_data;
f839892a
JS
6013 }
6014}
6015
6016/* Returns true if packet must be recirculated after decapsulation. */
6017static bool
6018xlate_generic_decap_action(struct xlate_ctx *ctx,
6019 const struct ofpact_decap *decap OVS_UNUSED)
6020{
6021 struct flow *flow = &ctx->xin->flow;
6022
6023 /* Ensure that any pending actions on the current packet are applied
6024 * before generating the decap action. */
6025 xlate_commit_actions(ctx);
6026
6027 /* We assume for now that the new_pkt_type is PT_USE_NEXT_PROTO. */
6028 switch (ntohl(flow->packet_type)) {
6029 case PT_ETH:
6030 if (flow->vlans[0].tci & htons(VLAN_CFI)) {
6031 /* Error handling: drop packet. */
6032 xlate_report_debug(ctx, OFT_ACTION, "Dropping packet, cannot "
6033 "decap Ethernet if VLAN is present.");
6034 ctx->error = 1;
6035 } else {
6036 /* Just change the packet_type.
6037 * Delay generating pop_eth to the next commit. */
6038 flow->packet_type = htonl(PACKET_TYPE(OFPHTN_ETHERTYPE,
6039 ntohs(flow->dl_type)));
6040 ctx->wc->masks.dl_type = OVS_BE16_MAX;
6041 }
6042 return false;
1fc11c59
JS
6043 case PT_NSH:
6044 /* The decap_nsh action is generated at the commit executed as
6045 * part of freezing the ctx for recirculation. Here we just set
6046 * the new packet type based on the NSH next protocol field. */
6047 switch (flow->nsh.np) {
6048 case NSH_P_ETHERNET:
6049 flow->packet_type = htonl(PT_ETH);
6050 break;
6051 case NSH_P_IPV4:
6052 flow->packet_type = htonl(PT_IPV4);
6053 break;
6054 case NSH_P_IPV6:
6055 flow->packet_type = htonl(PT_IPV6);
6056 break;
6057 case NSH_P_NSH:
6058 flow->packet_type = htonl(PT_NSH);
6059 break;
6060 default:
6061 /* Error handling: drop packet. */
6062 xlate_report_debug(ctx, OFT_ACTION,
6063 "Dropping packet as NSH next protocol %d "
6064 "is not supported", flow->nsh.np);
6065 ctx->error = 1;
6066 return false;
6067 break;
6068 }
6069 ctx->wc->masks.nsh.np = UINT8_MAX;
6070 /* Trigger recirculation. */
6071 return true;
f839892a 6072 default:
1fc11c59
JS
6073 /* Error handling: drop packet. */
6074 xlate_report_debug(
6075 ctx, OFT_ACTION,
6076 "Dropping packet as the decap() does not support "
6077 "packet type (%d,0x%x)",
6078 pt_ns(flow->packet_type), pt_ns_type(flow->packet_type));
f839892a
JS
6079 ctx->error = 1;
6080 return false;
6081 }
6082}
6083
e12ec36b
SH
6084static void
6085recirc_for_mpls(const struct ofpact *a, struct xlate_ctx *ctx)
6086{
6087 /* No need to recirculate if already exiting. */
6088 if (ctx->exit) {
6089 return;
6090 }
6091
6092 /* Do not consider recirculating unless the packet was previously MPLS. */
6093 if (!ctx->was_mpls) {
6094 return;
6095 }
6096
6097 /* Special case these actions, only recirculating if necessary.
6098 * This avoids the overhead of recirculation in common use-cases.
6099 */
6100 switch (a->type) {
6101
6102 /* Output actions do not require recirculation. */
6103 case OFPACT_OUTPUT:
aaca4fe0 6104 case OFPACT_OUTPUT_TRUNC:
e12ec36b
SH
6105 case OFPACT_ENQUEUE:
6106 case OFPACT_OUTPUT_REG:
6107 /* Set actions that don't touch L3+ fields do not require recirculation. */
6108 case OFPACT_SET_VLAN_VID:
6109 case OFPACT_SET_VLAN_PCP:
6110 case OFPACT_SET_ETH_SRC:
6111 case OFPACT_SET_ETH_DST:
6112 case OFPACT_SET_TUNNEL:
6113 case OFPACT_SET_QUEUE:
6114 /* If actions of a group require recirculation that can be detected
6115 * when translating them. */
6116 case OFPACT_GROUP:
6117 return;
6118
6119 /* Set field that don't touch L3+ fields don't require recirculation. */
6120 case OFPACT_SET_FIELD:
6121 if (mf_is_l3_or_higher(ofpact_get_SET_FIELD(a)->field)) {
6122 break;
6123 }
6124 return;
6125
6126 /* For simplicity, recirculate in all other cases. */
6127 case OFPACT_CONTROLLER:
6128 case OFPACT_BUNDLE:
6129 case OFPACT_STRIP_VLAN:
6130 case OFPACT_PUSH_VLAN:
6131 case OFPACT_SET_IPV4_SRC:
6132 case OFPACT_SET_IPV4_DST:
6133 case OFPACT_SET_IP_DSCP:
6134 case OFPACT_SET_IP_ECN:
6135 case OFPACT_SET_IP_TTL:
6136 case OFPACT_SET_L4_SRC_PORT:
6137 case OFPACT_SET_L4_DST_PORT:
6138 case OFPACT_REG_MOVE:
6139 case OFPACT_STACK_PUSH:
6140 case OFPACT_STACK_POP:
6141 case OFPACT_DEC_TTL:
6142 case OFPACT_SET_MPLS_LABEL:
6143 case OFPACT_SET_MPLS_TC:
6144 case OFPACT_SET_MPLS_TTL:
6145 case OFPACT_DEC_MPLS_TTL:
6146 case OFPACT_PUSH_MPLS:
6147 case OFPACT_POP_MPLS:
6148 case OFPACT_POP_QUEUE:
6149 case OFPACT_FIN_TIMEOUT:
6150 case OFPACT_RESUBMIT:
6151 case OFPACT_LEARN:
6152 case OFPACT_CONJUNCTION:
6153 case OFPACT_MULTIPATH:
6154 case OFPACT_NOTE:
6155 case OFPACT_EXIT:
6156 case OFPACT_SAMPLE:
7ae62a67 6157 case OFPACT_CLONE:
f839892a
JS
6158 case OFPACT_ENCAP:
6159 case OFPACT_DECAP:
e12ec36b
SH
6160 case OFPACT_UNROLL_XLATE:
6161 case OFPACT_CT:
72fe7578 6162 case OFPACT_CT_CLEAR:
e12ec36b
SH
6163 case OFPACT_NAT:
6164 case OFPACT_DEBUG_RECIRC:
6165 case OFPACT_METER:
6166 case OFPACT_CLEAR_ACTIONS:
6167 case OFPACT_WRITE_ACTIONS:
6168 case OFPACT_WRITE_METADATA:
6169 case OFPACT_GOTO_TABLE:
6170 default:
6171 break;
6172 }
6173
6174 /* Recirculate */
6175 ctx_trigger_freeze(ctx);
6176}
6177
2d9b49dd
BP
6178static void
6179xlate_ofpact_reg_move(struct xlate_ctx *ctx, const struct ofpact_reg_move *a)
6180{
6181 mf_subfield_copy(&a->src, &a->dst, &ctx->xin->flow, ctx->wc);
6182 xlate_report_subfield(ctx, &a->dst);
6183}
6184
6185static void
6186xlate_ofpact_stack_pop(struct xlate_ctx *ctx, const struct ofpact_stack *a)
6187{
6188 if (nxm_execute_stack_pop(a, &ctx->xin->flow, ctx->wc, &ctx->stack)) {
6189 xlate_report_subfield(ctx, &a->subfield);
6190 } else {
6191 xlate_report_error(ctx, "stack underflow");
6192 }
6193}
6194
6195/* Restore translation context data that was stored earlier. */
6196static void
6197xlate_ofpact_unroll_xlate(struct xlate_ctx *ctx,
6198 const struct ofpact_unroll_xlate *a)
6199{
6200 ctx->table_id = a->rule_table_id;
6201 ctx->rule_cookie = a->rule_cookie;
6202 xlate_report(ctx, OFT_THAW, "restored state: table=%"PRIu8", "
6203 "cookie=%#"PRIx64, a->rule_table_id, a->rule_cookie);
6204}
6205
9583bc14
EJ
6206static void
6207do_xlate_actions(const struct ofpact *ofpacts, size_t ofpacts_len,
feee58b9 6208 struct xlate_ctx *ctx, bool is_last_action)
9583bc14 6209{
49a73e0c 6210 struct flow_wildcards *wc = ctx->wc;
33bf9176 6211 struct flow *flow = &ctx->xin->flow;
9583bc14
EJ
6212 const struct ofpact *a;
6213
a36de779 6214 if (ovs_native_tunneling_is_on(ctx->xbridge->ofproto)) {
53902038 6215 tnl_neigh_snoop(flow, wc, ctx->xbridge->name);
a36de779 6216 }
f47ea021
JR
6217 /* dl_type already in the mask, not set below. */
6218
2d9b49dd
BP
6219 if (!ofpacts_len) {
6220 xlate_report(ctx, OFT_ACTION, "drop");
6221 return;
6222 }
6223
9583bc14
EJ
6224 OFPACT_FOR_EACH (a, ofpacts, ofpacts_len) {
6225 struct ofpact_controller *controller;
6226 const struct ofpact_metadata *metadata;
b2dd70be
JR
6227 const struct ofpact_set_field *set_field;
6228 const struct mf_field *mf;
feee58b9
AZ
6229 bool last = is_last_action && ofpact_last(a, ofpacts, ofpacts_len)
6230 && ctx->action_set.size;
9583bc14 6231
fff1b9c0
JR
6232 if (ctx->error) {
6233 break;
6234 }
6235
e12ec36b
SH
6236 recirc_for_mpls(a, ctx);
6237
e672ff9b
JR
6238 if (ctx->exit) {
6239 /* Check if need to store the remaining actions for later
6240 * execution. */
1d361a81
BP
6241 if (ctx->freezing) {
6242 freeze_unroll_actions(a, ofpact_end(ofpacts, ofpacts_len),
e672ff9b
JR
6243 ctx);
6244 }
6245 break;
7bbdd84f
SH
6246 }
6247
2d9b49dd
BP
6248 if (OVS_UNLIKELY(ctx->xin->trace)) {
6249 struct ds s = DS_EMPTY_INITIALIZER;
50f96b10 6250 ofpacts_format(a, OFPACT_ALIGN(a->len), NULL, &s);
2d9b49dd
BP
6251 xlate_report(ctx, OFT_ACTION, "%s", ds_cstr(&s));
6252 ds_destroy(&s);
6253 }
6254
9583bc14
EJ
6255 switch (a->type) {
6256 case OFPACT_OUTPUT:
6257 xlate_output_action(ctx, ofpact_get_OUTPUT(a)->port,
feee58b9 6258 ofpact_get_OUTPUT(a)->max_len, true, last);
9583bc14
EJ
6259 break;
6260
7395c052 6261 case OFPACT_GROUP:
feee58b9 6262 if (xlate_group_action(ctx, ofpact_get_GROUP(a)->group_id, last)) {
1d741d6d 6263 /* Group could not be found. */
db88b35c
JR
6264
6265 /* XXX: Terminates action list translation, but does not
6266 * terminate the pipeline. */
f4fb341b
SH
6267 return;
6268 }
7395c052
NZ
6269 break;
6270
9583bc14
EJ
6271 case OFPACT_CONTROLLER:
6272 controller = ofpact_get_CONTROLLER(a);
77ab5fd2
BP
6273 if (controller->pause) {
6274 ctx->pause = controller;
6275 ctx->xout->slow |= SLOW_CONTROLLER;
0d3239e8 6276 *ctx->paused_flow = ctx->xin->flow;
77ab5fd2
BP
6277 ctx_trigger_freeze(ctx);
6278 a = ofpact_next(a);
6279 } else {
6280 execute_controller_action(ctx, controller->max_len,
6281 controller->reason,
6282 controller->controller_id,
6283 controller->userdata,
6284 controller->userdata_len);
6285 }
9583bc14
EJ
6286 break;
6287
6288 case OFPACT_ENQUEUE:
16194afd
DDP
6289 memset(&wc->masks.skb_priority, 0xff,
6290 sizeof wc->masks.skb_priority);
feee58b9 6291 xlate_enqueue_action(ctx, ofpact_get_ENQUEUE(a), last);
9583bc14
EJ
6292 break;
6293
6294 case OFPACT_SET_VLAN_VID:
f0fb825a
EG
6295 wc->masks.vlans[0].tci |= htons(VLAN_VID_MASK | VLAN_CFI);
6296 if (flow->vlans[0].tci & htons(VLAN_CFI) ||
ca287d20 6297 ofpact_get_SET_VLAN_VID(a)->push_vlan_if_needed) {
f0fb825a
EG
6298 if (!flow->vlans[0].tpid) {
6299 flow->vlans[0].tpid = htons(ETH_TYPE_VLAN);
6300 }
6301 flow->vlans[0].tci &= ~htons(VLAN_VID_MASK);
6302 flow->vlans[0].tci |=
6303 (htons(ofpact_get_SET_VLAN_VID(a)->vlan_vid) |
6304 htons(VLAN_CFI));
ca287d20 6305 }
9583bc14
EJ
6306 break;
6307
6308 case OFPACT_SET_VLAN_PCP:
f0fb825a
EG
6309 wc->masks.vlans[0].tci |= htons(VLAN_PCP_MASK | VLAN_CFI);
6310 if (flow->vlans[0].tci & htons(VLAN_CFI) ||
ca287d20 6311 ofpact_get_SET_VLAN_PCP(a)->push_vlan_if_needed) {
f0fb825a
EG
6312 if (!flow->vlans[0].tpid) {
6313 flow->vlans[0].tpid = htons(ETH_TYPE_VLAN);
6314 }
6315 flow->vlans[0].tci &= ~htons(VLAN_PCP_MASK);
6316 flow->vlans[0].tci |=
6317 htons((ofpact_get_SET_VLAN_PCP(a)->vlan_pcp
6318 << VLAN_PCP_SHIFT) | VLAN_CFI);
ca287d20 6319 }
9583bc14
EJ
6320 break;
6321
6322 case OFPACT_STRIP_VLAN:
f0fb825a 6323 flow_pop_vlan(flow, wc);
9583bc14
EJ
6324 break;
6325
6326 case OFPACT_PUSH_VLAN:
f0fb825a
EG
6327 flow_push_vlan_uninit(flow, wc);
6328 flow->vlans[0].tpid = ofpact_get_PUSH_VLAN(a)->ethertype;
6329 flow->vlans[0].tci = htons(VLAN_CFI);
9583bc14
EJ
6330 break;
6331
6332 case OFPACT_SET_ETH_SRC:
74ff3298
JR
6333 WC_MASK_FIELD(wc, dl_src);
6334 flow->dl_src = ofpact_get_SET_ETH_SRC(a)->mac;
9583bc14
EJ
6335 break;
6336
6337 case OFPACT_SET_ETH_DST:
74ff3298
JR
6338 WC_MASK_FIELD(wc, dl_dst);
6339 flow->dl_dst = ofpact_get_SET_ETH_DST(a)->mac;
9583bc14
EJ
6340 break;
6341
6342 case OFPACT_SET_IPV4_SRC:
33bf9176 6343 if (flow->dl_type == htons(ETH_TYPE_IP)) {
f47ea021 6344 memset(&wc->masks.nw_src, 0xff, sizeof wc->masks.nw_src);
33bf9176 6345 flow->nw_src = ofpact_get_SET_IPV4_SRC(a)->ipv4;
9583bc14
EJ
6346 }
6347 break;
6348
6349 case OFPACT_SET_IPV4_DST:
33bf9176 6350 if (flow->dl_type == htons(ETH_TYPE_IP)) {
f47ea021 6351 memset(&wc->masks.nw_dst, 0xff, sizeof wc->masks.nw_dst);
33bf9176 6352 flow->nw_dst = ofpact_get_SET_IPV4_DST(a)->ipv4;
9583bc14
EJ
6353 }
6354 break;
6355
04f01c24
BP
6356 case OFPACT_SET_IP_DSCP:
6357 if (is_ip_any(flow)) {
f47ea021 6358 wc->masks.nw_tos |= IP_DSCP_MASK;
33bf9176 6359 flow->nw_tos &= ~IP_DSCP_MASK;
04f01c24 6360 flow->nw_tos |= ofpact_get_SET_IP_DSCP(a)->dscp;
9583bc14
EJ
6361 }
6362 break;
6363
ff14eb7a
JR
6364 case OFPACT_SET_IP_ECN:
6365 if (is_ip_any(flow)) {
6366 wc->masks.nw_tos |= IP_ECN_MASK;
6367 flow->nw_tos &= ~IP_ECN_MASK;
6368 flow->nw_tos |= ofpact_get_SET_IP_ECN(a)->ecn;
6369 }
6370 break;
6371
0c20dbe4
JR
6372 case OFPACT_SET_IP_TTL:
6373 if (is_ip_any(flow)) {
6374 wc->masks.nw_ttl = 0xff;
6375 flow->nw_ttl = ofpact_get_SET_IP_TTL(a)->ttl;
6376 }
6377 break;
6378
9583bc14 6379 case OFPACT_SET_L4_SRC_PORT:
b8778a0d 6380 if (is_ip_any(flow) && !(flow->nw_frag & FLOW_NW_FRAG_LATER)) {
f47ea021
JR
6381 memset(&wc->masks.nw_proto, 0xff, sizeof wc->masks.nw_proto);
6382 memset(&wc->masks.tp_src, 0xff, sizeof wc->masks.tp_src);
33bf9176 6383 flow->tp_src = htons(ofpact_get_SET_L4_SRC_PORT(a)->port);
9583bc14
EJ
6384 }
6385 break;
6386
6387 case OFPACT_SET_L4_DST_PORT:
b8778a0d 6388 if (is_ip_any(flow) && !(flow->nw_frag & FLOW_NW_FRAG_LATER)) {
f47ea021
JR
6389 memset(&wc->masks.nw_proto, 0xff, sizeof wc->masks.nw_proto);
6390 memset(&wc->masks.tp_dst, 0xff, sizeof wc->masks.tp_dst);
33bf9176 6391 flow->tp_dst = htons(ofpact_get_SET_L4_DST_PORT(a)->port);
9583bc14
EJ
6392 }
6393 break;
6394
6395 case OFPACT_RESUBMIT:
8bf009bf
JR
6396 /* Freezing complicates resubmit. Some action in the flow
6397 * entry found by resubmit might trigger freezing. If that
6398 * happens, then we do not want to execute the resubmit again after
6399 * during thawing, so we want to skip back to the head of the loop
6400 * to avoid that, only adding any actions that follow the resubmit
6401 * to the frozen actions.
6b1c5734 6402 */
feee58b9 6403 xlate_ofpact_resubmit(ctx, ofpact_get_RESUBMIT(a), last);
6b1c5734 6404 continue;
9583bc14
EJ
6405
6406 case OFPACT_SET_TUNNEL:
33bf9176 6407 flow->tunnel.tun_id = htonll(ofpact_get_SET_TUNNEL(a)->tun_id);
9583bc14
EJ
6408 break;
6409
6410 case OFPACT_SET_QUEUE:
16194afd
DDP
6411 memset(&wc->masks.skb_priority, 0xff,
6412 sizeof wc->masks.skb_priority);
9583bc14
EJ
6413 xlate_set_queue_action(ctx, ofpact_get_SET_QUEUE(a)->queue_id);
6414 break;
6415
6416 case OFPACT_POP_QUEUE:
16194afd
DDP
6417 memset(&wc->masks.skb_priority, 0xff,
6418 sizeof wc->masks.skb_priority);
2d9b49dd
BP
6419 if (flow->skb_priority != ctx->orig_skb_priority) {
6420 flow->skb_priority = ctx->orig_skb_priority;
6421 xlate_report(ctx, OFT_DETAIL, "queue = %#"PRIx32,
6422 flow->skb_priority);
6423 }
9583bc14
EJ
6424 break;
6425
6426 case OFPACT_REG_MOVE:
2d9b49dd 6427 xlate_ofpact_reg_move(ctx, ofpact_get_REG_MOVE(a));
9583bc14
EJ
6428 break;
6429
b2dd70be
JR
6430 case OFPACT_SET_FIELD:
6431 set_field = ofpact_get_SET_FIELD(a);
6432 mf = set_field->field;
b2dd70be 6433
aff49b8c
JR
6434 /* Set the field only if the packet actually has it. */
6435 if (mf_are_prereqs_ok(mf, flow, wc)) {
128684a6
JR
6436 mf_mask_field_masked(mf, ofpact_set_field_mask(set_field), wc);
6437 mf_set_flow_value_masked(mf, set_field->value,
6438 ofpact_set_field_mask(set_field),
6439 flow);
2d9b49dd
BP
6440 } else {
6441 xlate_report(ctx, OFT_WARN,
6442 "unmet prerequisites for %s, set_field ignored",
6443 mf->name);
6444
b8778a0d 6445 }
b2dd70be
JR
6446 break;
6447
9583bc14 6448 case OFPACT_STACK_PUSH:
33bf9176
BP
6449 nxm_execute_stack_push(ofpact_get_STACK_PUSH(a), flow, wc,
6450 &ctx->stack);
9583bc14
EJ
6451 break;
6452
6453 case OFPACT_STACK_POP:
2d9b49dd 6454 xlate_ofpact_stack_pop(ctx, ofpact_get_STACK_POP(a));
9583bc14
EJ
6455 break;
6456
6457 case OFPACT_PUSH_MPLS:
8bfd0fda 6458 compose_mpls_push_action(ctx, ofpact_get_PUSH_MPLS(a));
9583bc14
EJ
6459 break;
6460
6461 case OFPACT_POP_MPLS:
8bfd0fda 6462 compose_mpls_pop_action(ctx, ofpact_get_POP_MPLS(a)->ethertype);
9583bc14
EJ
6463 break;
6464
097d4939 6465 case OFPACT_SET_MPLS_LABEL:
8bfd0fda
BP
6466 compose_set_mpls_label_action(
6467 ctx, ofpact_get_SET_MPLS_LABEL(a)->label);
1d741d6d 6468 break;
097d4939
JR
6469
6470 case OFPACT_SET_MPLS_TC:
8bfd0fda 6471 compose_set_mpls_tc_action(ctx, ofpact_get_SET_MPLS_TC(a)->tc);
097d4939
JR
6472 break;
6473
9583bc14 6474 case OFPACT_SET_MPLS_TTL:
8bfd0fda 6475 compose_set_mpls_ttl_action(ctx, ofpact_get_SET_MPLS_TTL(a)->ttl);
9583bc14
EJ
6476 break;
6477
6478 case OFPACT_DEC_MPLS_TTL:
9cfef3d0 6479 if (compose_dec_mpls_ttl_action(ctx)) {
ad3efdcb 6480 return;
9583bc14
EJ
6481 }
6482 break;
6483
6484 case OFPACT_DEC_TTL:
f74e7df7 6485 wc->masks.nw_ttl = 0xff;
9583bc14 6486 if (compose_dec_ttl(ctx, ofpact_get_DEC_TTL(a))) {
ad3efdcb 6487 return;
9583bc14
EJ
6488 }
6489 break;
6490
6491 case OFPACT_NOTE:
6492 /* Nothing to do. */
6493 break;
6494
6495 case OFPACT_MULTIPATH:
33bf9176 6496 multipath_execute(ofpact_get_MULTIPATH(a), flow, wc);
2d9b49dd 6497 xlate_report_subfield(ctx, &ofpact_get_MULTIPATH(a)->dst);
9583bc14
EJ
6498 break;
6499
6500 case OFPACT_BUNDLE:
feee58b9 6501 xlate_bundle_action(ctx, ofpact_get_BUNDLE(a), last);
9583bc14
EJ
6502 break;
6503
6504 case OFPACT_OUTPUT_REG:
feee58b9 6505 xlate_output_reg_action(ctx, ofpact_get_OUTPUT_REG(a), last);
9583bc14
EJ
6506 break;
6507
aaca4fe0
WT
6508 case OFPACT_OUTPUT_TRUNC:
6509 xlate_output_trunc_action(ctx, ofpact_get_OUTPUT_TRUNC(a)->port,
feee58b9 6510 ofpact_get_OUTPUT_TRUNC(a)->max_len, last);
aaca4fe0
WT
6511 break;
6512
9583bc14
EJ
6513 case OFPACT_LEARN:
6514 xlate_learn_action(ctx, ofpact_get_LEARN(a));
6515 break;
6516
2d9b49dd 6517 case OFPACT_CONJUNCTION:
afc3987b
BP
6518 /* A flow with a "conjunction" action represents part of a special
6519 * kind of "set membership match". Such a flow should not actually
6520 * get executed, but it could via, say, a "packet-out", even though
6521 * that wouldn't be useful. Log it to help debugging. */
2d9b49dd 6522 xlate_report_error(ctx, "executing no-op conjunction action");
18080541
BP
6523 break;
6524
9583bc14
EJ
6525 case OFPACT_EXIT:
6526 ctx->exit = true;
6527 break;
6528
2d9b49dd
BP
6529 case OFPACT_UNROLL_XLATE:
6530 xlate_ofpact_unroll_xlate(ctx, ofpact_get_UNROLL_XLATE(a));
e672ff9b 6531 break;
2d9b49dd 6532
9583bc14 6533 case OFPACT_FIN_TIMEOUT:
33bf9176 6534 memset(&wc->masks.nw_proto, 0xff, sizeof wc->masks.nw_proto);
9583bc14
EJ
6535 xlate_fin_timeout(ctx, ofpact_get_FIN_TIMEOUT(a));
6536 break;
6537
6538 case OFPACT_CLEAR_ACTIONS:
2d9b49dd 6539 xlate_report_action_set(ctx, "was");
7fdb60a7 6540 ofpbuf_clear(&ctx->action_set);
c61f3870
BP
6541 ctx->xin->flow.actset_output = OFPP_UNSET;
6542 ctx->action_set_has_group = false;
7fdb60a7
SH
6543 break;
6544
6545 case OFPACT_WRITE_ACTIONS:
7e7e8dbb 6546 xlate_write_actions(ctx, ofpact_get_WRITE_ACTIONS(a));
2d9b49dd 6547 xlate_report_action_set(ctx, "is");
9583bc14
EJ
6548 break;
6549
6550 case OFPACT_WRITE_METADATA:
6551 metadata = ofpact_get_WRITE_METADATA(a);
33bf9176
BP
6552 flow->metadata &= ~metadata->mask;
6553 flow->metadata |= metadata->metadata & metadata->mask;
9583bc14
EJ
6554 break;
6555
638a19b0 6556 case OFPACT_METER:
076caa2f 6557 xlate_meter_action(ctx, ofpact_get_METER(a));
638a19b0
JR
6558 break;
6559
9583bc14 6560 case OFPACT_GOTO_TABLE: {
9583bc14 6561 struct ofpact_goto_table *ogt = ofpact_get_GOTO_TABLE(a);
9583bc14 6562
9167fc1a
JR
6563 ovs_assert(ctx->table_id < ogt->table_id);
6564
4468099e 6565 xlate_table_action(ctx, ctx->xin->flow.in_port.ofp_port,
96c3a6e5
AZ
6566 ogt->table_id, true, true, false, last,
6567 do_xlate_actions);
9583bc14
EJ
6568 break;
6569 }
6570
6571 case OFPACT_SAMPLE:
6572 xlate_sample_action(ctx, ofpact_get_SAMPLE(a));
6573 break;
d4abaff5 6574
7ae62a67 6575 case OFPACT_CLONE:
feee58b9 6576 compose_clone(ctx, ofpact_get_CLONE(a), last);
7ae62a67
WT
6577 break;
6578
f839892a
JS
6579 case OFPACT_ENCAP:
6580 xlate_generic_encap_action(ctx, ofpact_get_ENCAP(a));
6581 break;
6582
6583 case OFPACT_DECAP: {
6584 bool recirc_needed =
6585 xlate_generic_decap_action(ctx, ofpact_get_DECAP(a));
6586 if (!ctx->error && recirc_needed) {
6587 /* Recirculate for parsing of inner packet. */
6588 ctx_trigger_freeze(ctx);
6589 /* Then continue with next action. */
6590 a = ofpact_next(a);
6591 }
6592 break;
6593 }
6594
07659514 6595 case OFPACT_CT:
feee58b9 6596 compose_conntrack_action(ctx, ofpact_get_CT(a), last);
07659514
JS
6597 break;
6598
72fe7578
BP
6599 case OFPACT_CT_CLEAR:
6600 clear_conntrack(ctx);
6601 break;
6602
9ac0aada
JR
6603 case OFPACT_NAT:
6604 /* This will be processed by compose_conntrack_action(). */
6605 ctx->ct_nat_action = ofpact_get_NAT(a);
6606 break;
6607
d4abaff5 6608 case OFPACT_DEBUG_RECIRC:
1d361a81 6609 ctx_trigger_freeze(ctx);
d4abaff5
BP
6610 a = ofpact_next(a);
6611 break;
9583bc14 6612 }
1d741d6d
JR
6613
6614 /* Check if need to store this and the remaining actions for later
6615 * execution. */
1d361a81
BP
6616 if (!ctx->error && ctx->exit && ctx_first_frozen_action(ctx)) {
6617 freeze_unroll_actions(a, ofpact_end(ofpacts, ofpacts_len), ctx);
1d741d6d
JR
6618 break;
6619 }
9583bc14 6620 }
9583bc14
EJ
6621}
6622
6623void
6624xlate_in_init(struct xlate_in *xin, struct ofproto_dpif *ofproto,
1f4a8933
JR
6625 ovs_version_t version, const struct flow *flow,
6626 ofp_port_t in_port, struct rule_dpif *rule, uint16_t tcp_flags,
1520ef4f
BP
6627 const struct dp_packet *packet, struct flow_wildcards *wc,
6628 struct ofpbuf *odp_actions)
9583bc14
EJ
6629{
6630 xin->ofproto = ofproto;
1f4a8933 6631 xin->tables_version = version;
9583bc14 6632 xin->flow = *flow;
8d8ab6c2 6633 xin->upcall_flow = flow;
cc377352 6634 xin->flow.in_port.ofp_port = in_port;
c61f3870 6635 xin->flow.actset_output = OFPP_UNSET;
9583bc14 6636 xin->packet = packet;
df70a773 6637 xin->allow_side_effects = packet != NULL;
9583bc14 6638 xin->rule = rule;
b256dc52 6639 xin->xcache = NULL;
9583bc14
EJ
6640 xin->ofpacts = NULL;
6641 xin->ofpacts_len = 0;
6642 xin->tcp_flags = tcp_flags;
2d9b49dd 6643 xin->trace = NULL;
9583bc14 6644 xin->resubmit_stats = NULL;
790c5d26 6645 xin->depth = 0;
cdd42eda 6646 xin->resubmits = 0;
49a73e0c 6647 xin->wc = wc;
1520ef4f 6648 xin->odp_actions = odp_actions;
331c07ac 6649 xin->in_packet_out = false;
e6bc8e74 6650 xin->recirc_queue = NULL;
e672ff9b
JR
6651
6652 /* Do recirc lookup. */
1d361a81 6653 xin->frozen_state = NULL;
29b1ea3f
BP
6654 if (flow->recirc_id) {
6655 const struct recirc_id_node *node
6656 = recirc_id_node_find(flow->recirc_id);
6657 if (node) {
1d361a81 6658 xin->frozen_state = &node->state;
29b1ea3f
BP
6659 }
6660 }
9583bc14
EJ
6661}
6662
6663void
6664xlate_out_uninit(struct xlate_out *xout)
6665{
e672ff9b 6666 if (xout) {
fbf5d6ec 6667 recirc_refs_unref(&xout->recircs);
9583bc14
EJ
6668 }
6669}
9583bc14 6670\f
55954f6e
EJ
6671static struct skb_priority_to_dscp *
6672get_skb_priority(const struct xport *xport, uint32_t skb_priority)
6673{
6674 struct skb_priority_to_dscp *pdscp;
6675 uint32_t hash;
6676
6677 hash = hash_int(skb_priority, 0);
6678 HMAP_FOR_EACH_IN_BUCKET (pdscp, hmap_node, hash, &xport->skb_priorities) {
6679 if (pdscp->skb_priority == skb_priority) {
6680 return pdscp;
6681 }
6682 }
6683 return NULL;
6684}
6685
6686static bool
6687dscp_from_skb_priority(const struct xport *xport, uint32_t skb_priority,
6688 uint8_t *dscp)
6689{
6690 struct skb_priority_to_dscp *pdscp = get_skb_priority(xport, skb_priority);
6691 *dscp = pdscp ? pdscp->dscp : 0;
6692 return pdscp != NULL;
6693}
6694
16194afd
DDP
6695static size_t
6696count_skb_priorities(const struct xport *xport)
6697{
6698 return hmap_count(&xport->skb_priorities);
6699}
6700
55954f6e
EJ
6701static void
6702clear_skb_priorities(struct xport *xport)
6703{
4ec3d7c7 6704 struct skb_priority_to_dscp *pdscp;
55954f6e 6705
4ec3d7c7 6706 HMAP_FOR_EACH_POP (pdscp, hmap_node, &xport->skb_priorities) {
55954f6e
EJ
6707 free(pdscp);
6708 }
6709}
6710
ce4a6b76
BP
6711static bool
6712actions_output_to_local_port(const struct xlate_ctx *ctx)
6713{
46c88433 6714 odp_port_t local_odp_port = ofp_port_to_odp_port(ctx->xbridge, OFPP_LOCAL);
ce4a6b76
BP
6715 const struct nlattr *a;
6716 unsigned int left;
6717
1520ef4f
BP
6718 NL_ATTR_FOR_EACH_UNSAFE (a, left, ctx->odp_actions->data,
6719 ctx->odp_actions->size) {
ce4a6b76
BP
6720 if (nl_attr_type(a) == OVS_ACTION_ATTR_OUTPUT
6721 && nl_attr_get_odp_port(a) == local_odp_port) {
6722 return true;
6723 }
6724 }
6725 return false;
6726}
9583bc14 6727
5e2a6702 6728#if defined(__linux__)
7d031d7e
BP
6729/* Returns the maximum number of packets that the Linux kernel is willing to
6730 * queue up internally to certain kinds of software-implemented ports, or the
6731 * default (and rarely modified) value if it cannot be determined. */
6732static int
6733netdev_max_backlog(void)
6734{
6735 static struct ovsthread_once once = OVSTHREAD_ONCE_INITIALIZER;
6736 static int max_backlog = 1000; /* The normal default value. */
6737
6738 if (ovsthread_once_start(&once)) {
6739 static const char filename[] = "/proc/sys/net/core/netdev_max_backlog";
6740 FILE *stream;
6741 int n;
6742
6743 stream = fopen(filename, "r");
6744 if (!stream) {
120c348f 6745 VLOG_INFO("%s: open failed (%s)", filename, ovs_strerror(errno));
7d031d7e
BP
6746 } else {
6747 if (fscanf(stream, "%d", &n) != 1) {
6748 VLOG_WARN("%s: read error", filename);
6749 } else if (n <= 100) {
6750 VLOG_WARN("%s: unexpectedly small value %d", filename, n);
6751 } else {
6752 max_backlog = n;
6753 }
6754 fclose(stream);
6755 }
6756 ovsthread_once_done(&once);
6757
6758 VLOG_DBG("%s: using %d max_backlog", filename, max_backlog);
6759 }
6760
6761 return max_backlog;
6762}
6763
6764/* Counts and returns the number of OVS_ACTION_ATTR_OUTPUT actions in
6765 * 'odp_actions'. */
6766static int
6767count_output_actions(const struct ofpbuf *odp_actions)
6768{
6769 const struct nlattr *a;
6770 size_t left;
6771 int n = 0;
6772
6fd6ed71 6773 NL_ATTR_FOR_EACH_UNSAFE (a, left, odp_actions->data, odp_actions->size) {
7d031d7e
BP
6774 if (a->nla_type == OVS_ACTION_ATTR_OUTPUT) {
6775 n++;
6776 }
6777 }
6778 return n;
6779}
5e2a6702 6780#endif /* defined(__linux__) */
7d031d7e
BP
6781
6782/* Returns true if 'odp_actions' contains more output actions than the datapath
6783 * can reliably handle in one go. On Linux, this is the value of the
6784 * net.core.netdev_max_backlog sysctl, which limits the maximum number of
6785 * packets that the kernel is willing to queue up for processing while the
6786 * datapath is processing a set of actions. */
6787static bool
5e2a6702 6788too_many_output_actions(const struct ofpbuf *odp_actions OVS_UNUSED)
7d031d7e
BP
6789{
6790#ifdef __linux__
6fd6ed71 6791 return (odp_actions->size / NL_A_U32_SIZE > netdev_max_backlog()
7d031d7e
BP
6792 && count_output_actions(odp_actions) > netdev_max_backlog());
6793#else
6794 /* OSes other than Linux might have similar limits, but we don't know how
6795 * to determine them.*/
6796 return false;
6797#endif
6798}
6799
234c3da9
BP
6800static void
6801xlate_wc_init(struct xlate_ctx *ctx)
6802{
6803 flow_wildcards_init_catchall(ctx->wc);
6804
6805 /* Some fields we consider to always be examined. */
3d4b2e6e 6806 WC_MASK_FIELD(ctx->wc, packet_type);
5e2e998a 6807 WC_MASK_FIELD(ctx->wc, in_port);
3d4b2e6e
JS
6808 if (is_ethernet(&ctx->xin->flow, NULL)) {
6809 WC_MASK_FIELD(ctx->wc, dl_type);
6810 }
234c3da9 6811 if (is_ip_any(&ctx->xin->flow)) {
5e2e998a 6812 WC_MASK_FIELD_MASK(ctx->wc, nw_frag, FLOW_NW_FRAG_MASK);
234c3da9
BP
6813 }
6814
6815 if (ctx->xbridge->support.odp.recirc) {
6816 /* Always exactly match recirc_id when datapath supports
6817 * recirculation. */
5e2e998a 6818 WC_MASK_FIELD(ctx->wc, recirc_id);
234c3da9
BP
6819 }
6820
6821 if (ctx->xbridge->netflow) {
6822 netflow_mask_wc(&ctx->xin->flow, ctx->wc);
6823 }
6824
6825 tnl_wc_init(&ctx->xin->flow, ctx->wc);
6826}
6827
6828static void
6829xlate_wc_finish(struct xlate_ctx *ctx)
6830{
f0fb825a
EG
6831 int i;
6832
234c3da9
BP
6833 /* Clear the metadata and register wildcard masks, because we won't
6834 * use non-header fields as part of the cache. */
6835 flow_wildcards_clear_non_packet_fields(ctx->wc);
6836
f839892a 6837 /* Wildcard ethernet fields if the original packet type was not
beb75a40
JS
6838 * Ethernet. */
6839 if (ctx->xin->upcall_flow->packet_type != htonl(PT_ETH)) {
6840 ctx->wc->masks.dl_dst = eth_addr_zero;
6841 ctx->wc->masks.dl_src = eth_addr_zero;
3d4b2e6e 6842 ctx->wc->masks.dl_type = 0;
beb75a40
JS
6843 }
6844
234c3da9
BP
6845 /* ICMPv4 and ICMPv6 have 8-bit "type" and "code" fields. struct flow
6846 * uses the low 8 bits of the 16-bit tp_src and tp_dst members to
6847 * represent these fields. The datapath interface, on the other hand,
6848 * represents them with just 8 bits each. This means that if the high
6849 * 8 bits of the masks for these fields somehow become set, then they
6850 * will get chopped off by a round trip through the datapath, and
6851 * revalidation will spot that as an inconsistency and delete the flow.
6852 * Avoid the problem here by making sure that only the low 8 bits of
6853 * either field can be unwildcarded for ICMP.
6854 */
a75636c8 6855 if (is_icmpv4(&ctx->xin->flow, NULL) || is_icmpv6(&ctx->xin->flow, NULL)) {
234c3da9
BP
6856 ctx->wc->masks.tp_src &= htons(UINT8_MAX);
6857 ctx->wc->masks.tp_dst &= htons(UINT8_MAX);
6858 }
6859 /* VLAN_TCI CFI bit must be matched if any of the TCI is matched. */
f0fb825a
EG
6860 for (i = 0; i < FLOW_MAX_VLAN_HEADERS; i++) {
6861 if (ctx->wc->masks.vlans[i].tci) {
6862 ctx->wc->masks.vlans[i].tci |= htons(VLAN_CFI);
6863 }
234c3da9 6864 }
4a7ab326
DDP
6865
6866 /* The classifier might return masks that match on tp_src and tp_dst even
6867 * for later fragments. This happens because there might be flows that
6868 * match on tp_src or tp_dst without matching on the frag bits, because
6869 * it is not a prerequisite for OpenFlow. Since it is a prerequisite for
6870 * datapath flows and since tp_src and tp_dst are always going to be 0,
6871 * wildcard the fields here. */
6872 if (ctx->xin->flow.nw_frag & FLOW_NW_FRAG_LATER) {
6873 ctx->wc->masks.tp_src = 0;
6874 ctx->wc->masks.tp_dst = 0;
6875 }
234c3da9
BP
6876}
6877
e672ff9b
JR
6878/* Translates the flow, actions, or rule in 'xin' into datapath actions in
6879 * 'xout'.
56450a41 6880 * The caller must take responsibility for eventually freeing 'xout', with
fff1b9c0
JR
6881 * xlate_out_uninit().
6882 * Returns 'XLATE_OK' if translation was successful. In case of an error an
6883 * empty set of actions will be returned in 'xin->odp_actions' (if non-NULL),
6884 * so that most callers may ignore the return value and transparently install a
6885 * drop flow when the translation fails. */
6886enum xlate_error
84f0f298 6887xlate_actions(struct xlate_in *xin, struct xlate_out *xout)
9583bc14 6888{
e467ea42
BP
6889 *xout = (struct xlate_out) {
6890 .slow = 0,
fbf5d6ec 6891 .recircs = RECIRC_REFS_EMPTY_INITIALIZER,
e467ea42
BP
6892 };
6893
84f0f298 6894 struct xlate_cfg *xcfg = ovsrcu_get(struct xlate_cfg *, &xcfgp);
bb00fdef
BP
6895 struct xbridge *xbridge = xbridge_lookup(xcfg, xin->ofproto);
6896 if (!xbridge) {
fff1b9c0 6897 return XLATE_BRIDGE_NOT_FOUND;
bb00fdef
BP
6898 }
6899
33bf9176
BP
6900 struct flow *flow = &xin->flow;
6901
84cf3c1f 6902 uint8_t stack_stub[1024];
bb00fdef 6903 uint64_t action_set_stub[1024 / 8];
1d361a81 6904 uint64_t frozen_actions_stub[1024 / 8];
1520ef4f
BP
6905 uint64_t actions_stub[256 / 8];
6906 struct ofpbuf scratch_actions = OFPBUF_STUB_INITIALIZER(actions_stub);
0d3239e8 6907 struct flow paused_flow;
bb00fdef
BP
6908 struct xlate_ctx ctx = {
6909 .xin = xin,
6910 .xout = xout,
6911 .base_flow = *flow,
c2b878e0 6912 .orig_tunnel_ipv6_dst = flow_tnl_dst(&flow->tunnel),
bb00fdef
BP
6913 .xbridge = xbridge,
6914 .stack = OFPBUF_STUB_INITIALIZER(stack_stub),
6915 .rule = xin->rule,
c0e638aa
BP
6916 .wc = (xin->wc
6917 ? xin->wc
f36efd90 6918 : &(struct flow_wildcards) { .masks = { .dl_type = 0 } }),
1520ef4f 6919 .odp_actions = xin->odp_actions ? xin->odp_actions : &scratch_actions,
bb00fdef 6920
790c5d26 6921 .depth = xin->depth,
cdd42eda 6922 .resubmits = xin->resubmits,
bb00fdef
BP
6923 .in_group = false,
6924 .in_action_set = false,
331c07ac 6925 .in_packet_out = xin->in_packet_out,
f839892a 6926 .pending_encap = false,
1fc11c59 6927 .encap_data = NULL,
bb00fdef
BP
6928
6929 .table_id = 0,
6930 .rule_cookie = OVS_BE64_MAX,
6931 .orig_skb_priority = flow->skb_priority,
6932 .sflow_n_outputs = 0,
6933 .sflow_odp_port = 0,
2031ef97 6934 .nf_output_iface = NF_OUT_DROP,
bb00fdef 6935 .exit = false,
fff1b9c0 6936 .error = XLATE_OK,
3d6151f3 6937 .mirrors = 0,
bb00fdef 6938
1d361a81 6939 .freezing = false,
53cc166a 6940 .recirc_update_dp_hash = false,
1d361a81 6941 .frozen_actions = OFPBUF_STUB_INITIALIZER(frozen_actions_stub),
77ab5fd2 6942 .pause = NULL,
0d3239e8 6943 .paused_flow = &paused_flow,
bb00fdef 6944
e12ec36b 6945 .was_mpls = false,
07659514 6946 .conntracked = false,
bb00fdef 6947
9ac0aada
JR
6948 .ct_nat_action = NULL,
6949
bb00fdef
BP
6950 .action_set_has_group = false,
6951 .action_set = OFPBUF_STUB_INITIALIZER(action_set_stub),
6952 };
865ca6cf
BP
6953
6954 /* 'base_flow' reflects the packet as it came in, but we need it to reflect
42deb67d
PS
6955 * the packet as the datapath will treat it for output actions. Our
6956 * datapath doesn't retain tunneling information without us re-setting
6957 * it, so clear the tunnel data.
865ca6cf 6958 */
42deb67d 6959
bb00fdef 6960 memset(&ctx.base_flow.tunnel, 0, sizeof ctx.base_flow.tunnel);
865ca6cf 6961
1520ef4f 6962 ofpbuf_reserve(ctx.odp_actions, NL_A_U32_SIZE);
c0e638aa 6963 xlate_wc_init(&ctx);
bb00fdef 6964
46c88433 6965 COVERAGE_INC(xlate_actions);
9583bc14 6966
2d9b49dd
BP
6967 xin->trace = xlate_report(&ctx, OFT_BRIDGE, "bridge(\"%s\")",
6968 xbridge->name);
1d361a81
BP
6969 if (xin->frozen_state) {
6970 const struct frozen_state *state = xin->frozen_state;
e672ff9b 6971
2d9b49dd
BP
6972 struct ovs_list *old_trace = xin->trace;
6973 xin->trace = xlate_report(&ctx, OFT_THAW, "thaw");
d6bef3cc 6974
e672ff9b 6975 if (xin->ofpacts_len > 0 || ctx.rule) {
2d9b49dd
BP
6976 xlate_report_error(&ctx, "Recirculation conflict (%s)!",
6977 xin->ofpacts_len ? "actions" : "rule");
fff1b9c0 6978 ctx.error = XLATE_RECIRCULATION_CONFLICT;
1520ef4f 6979 goto exit;
e672ff9b
JR
6980 }
6981
6982 /* Set the bridge for post-recirculation processing if needed. */
07a3cd5c 6983 if (!uuid_equals(&ctx.xbridge->ofproto->uuid, &state->ofproto_uuid)) {
2082425c 6984 const struct xbridge *new_bridge
290835f9 6985 = xbridge_lookup_by_uuid(xcfg, &state->ofproto_uuid);
e672ff9b
JR
6986
6987 if (OVS_UNLIKELY(!new_bridge)) {
6988 /* Drop the packet if the bridge cannot be found. */
2d9b49dd 6989 xlate_report_error(&ctx, "Frozen bridge no longer exists.");
fff1b9c0 6990 ctx.error = XLATE_BRIDGE_NOT_FOUND;
2d9b49dd 6991 xin->trace = old_trace;
1520ef4f 6992 goto exit;
e672ff9b
JR
6993 }
6994 ctx.xbridge = new_bridge;
1f4a8933
JR
6995 /* The bridge is now known so obtain its table version. */
6996 ctx.xin->tables_version
6997 = ofproto_dpif_get_tables_version(ctx.xbridge->ofproto);
e672ff9b
JR
6998 }
6999
1d361a81
BP
7000 /* Set the thawed table id. Note: A table lookup is done only if there
7001 * are no frozen actions. */
2082425c 7002 ctx.table_id = state->table_id;
2d9b49dd
BP
7003 xlate_report(&ctx, OFT_THAW,
7004 "Resuming from table %"PRIu8, ctx.table_id);
e672ff9b 7005
40b0fbd3 7006 ctx.conntracked = state->conntracked;
07659514 7007 if (!state->conntracked) {
72fe7578 7008 clear_conntrack(&ctx);
07659514
JS
7009 }
7010
e672ff9b 7011 /* Restore pipeline metadata. May change flow's in_port and other
1d361a81
BP
7012 * metadata to the values that existed when freezing was triggered. */
7013 frozen_metadata_to_flow(&state->metadata, flow);
e672ff9b
JR
7014
7015 /* Restore stack, if any. */
2082425c 7016 if (state->stack) {
84cf3c1f 7017 ofpbuf_put(&ctx.stack, state->stack, state->stack_size);
e672ff9b
JR
7018 }
7019
29bae541
BP
7020 /* Restore mirror state. */
7021 ctx.mirrors = state->mirrors;
7022
e672ff9b 7023 /* Restore action set, if any. */
2082425c 7024 if (state->action_set_len) {
2d9b49dd 7025 xlate_report_actions(&ctx, OFT_THAW, "Restoring action set",
417509fa 7026 state->action_set, state->action_set_len);
d6bef3cc 7027
7e7e8dbb
BP
7028 flow->actset_output = OFPP_UNSET;
7029 xlate_write_actions__(&ctx, state->action_set,
7030 state->action_set_len);
e672ff9b
JR
7031 }
7032
1d361a81
BP
7033 /* Restore frozen actions. If there are no actions, processing will
7034 * start with a lookup in the table set above. */
417509fa
BP
7035 xin->ofpacts = state->ofpacts;
7036 xin->ofpacts_len = state->ofpacts_len;
7037 if (state->ofpacts_len) {
2d9b49dd 7038 xlate_report_actions(&ctx, OFT_THAW, "Restoring actions",
d6bef3cc 7039 xin->ofpacts, xin->ofpacts_len);
e672ff9b 7040 }
e672ff9b 7041
2d9b49dd
BP
7042 xin->trace = old_trace;
7043 } else if (OVS_UNLIKELY(flow->recirc_id)) {
7044 xlate_report_error(&ctx,
7045 "Recirculation context not found for ID %"PRIx32,
7046 flow->recirc_id);
fff1b9c0 7047 ctx.error = XLATE_NO_RECIRCULATION_CONTEXT;
1520ef4f 7048 goto exit;
e672ff9b 7049 }
9583bc14 7050
8d8ab6c2
JG
7051 /* Tunnel metadata in udpif format must be normalized before translation. */
7052 if (flow->tunnel.flags & FLOW_TNL_F_UDPIF) {
5b09d9f7
MS
7053 const struct tun_table *tun_tab = ofproto_get_tun_tab(
7054 &ctx.xbridge->ofproto->up);
8d8ab6c2
JG
7055 int err;
7056
7057 err = tun_metadata_from_geneve_udpif(tun_tab, &xin->upcall_flow->tunnel,
7058 &xin->upcall_flow->tunnel,
7059 &flow->tunnel);
7060 if (err) {
2d9b49dd 7061 xlate_report_error(&ctx, "Invalid Geneve tunnel metadata");
8d8ab6c2
JG
7062 ctx.error = XLATE_INVALID_TUNNEL_METADATA;
7063 goto exit;
7064 }
7065 } else if (!flow->tunnel.metadata.tab) {
7066 /* If the original flow did not come in on a tunnel, then it won't have
7067 * FLOW_TNL_F_UDPIF set. However, we still need to have a metadata
7068 * table in case we generate tunnel actions. */
5b09d9f7
MS
7069 flow->tunnel.metadata.tab = ofproto_get_tun_tab(
7070 &ctx.xbridge->ofproto->up);
8d8ab6c2
JG
7071 }
7072 ctx.wc->masks.tunnel.metadata.tab = flow->tunnel.metadata.tab;
7073
beb75a40
JS
7074 /* Get the proximate input port of the packet. (If xin->frozen_state,
7075 * flow->in_port is the ultimate input port of the packet.) */
7076 struct xport *in_port = get_ofp_port(xbridge,
7077 ctx.base_flow.in_port.ofp_port);
7078
875ab130
BP
7079 if (flow->packet_type != htonl(PT_ETH) && in_port &&
7080 in_port->pt_mode == NETDEV_PT_LEGACY_L3 && ctx.table_id == 0) {
beb75a40
JS
7081 /* Add dummy Ethernet header to non-L2 packet if it's coming from a
7082 * L3 port. So all packets will be L2 packets for lookup.
7083 * The dl_type has already been set from the packet_type. */
7084 flow->packet_type = htonl(PT_ETH);
7085 flow->dl_src = eth_addr_zero;
7086 flow->dl_dst = eth_addr_zero;
f839892a 7087 ctx.pending_encap = true;
beb75a40
JS
7088 }
7089
10c44245 7090 if (!xin->ofpacts && !ctx.rule) {
b2e89cc9 7091 ctx.rule = rule_dpif_lookup_from_table(
1f4a8933 7092 ctx.xbridge->ofproto, ctx.xin->tables_version, flow, ctx.wc,
1e1e1d19 7093 ctx.xin->resubmit_stats, &ctx.table_id,
a027899e 7094 flow->in_port.ofp_port, true, true, ctx.xin->xcache);
10c44245 7095 if (ctx.xin->resubmit_stats) {
b2e89cc9 7096 rule_dpif_credit_stats(ctx.rule, ctx.xin->resubmit_stats);
10c44245 7097 }
b256dc52
JS
7098 if (ctx.xin->xcache) {
7099 struct xc_entry *entry;
7100
7101 entry = xlate_cache_add_entry(ctx.xin->xcache, XC_RULE);
901a517e 7102 entry->rule = ctx.rule;
07a3cd5c 7103 ofproto_rule_ref(&ctx.rule->up);
b256dc52 7104 }
a8c31348 7105
2d9b49dd 7106 xlate_report_table(&ctx, ctx.rule, ctx.table_id);
10c44245 7107 }
10c44245 7108
1d361a81
BP
7109 /* Tunnel stats only for not-thawed packets. */
7110 if (!xin->frozen_state && in_port && in_port->is_tunnel) {
b256dc52
JS
7111 if (ctx.xin->resubmit_stats) {
7112 netdev_vport_inc_rx(in_port->netdev, ctx.xin->resubmit_stats);
7113 if (in_port->bfd) {
7114 bfd_account_rx(in_port->bfd, ctx.xin->resubmit_stats);
7115 }
7116 }
7117 if (ctx.xin->xcache) {
7118 struct xc_entry *entry;
7119
7120 entry = xlate_cache_add_entry(ctx.xin->xcache, XC_NETDEV);
901a517e
JR
7121 entry->dev.rx = netdev_ref(in_port->netdev);
7122 entry->dev.bfd = bfd_ref(in_port->bfd);
d6fc5f57
EJ
7123 }
7124 }
7125
1d361a81 7126 if (!xin->frozen_state && process_special(&ctx, in_port)) {
bef1403e
BP
7127 /* process_special() did all the processing for this packet.
7128 *
1d361a81
BP
7129 * We do not perform special processing on thawed packets, since that
7130 * was done before they were frozen and should not be redone. */
bef1403e
BP
7131 } else if (in_port && in_port->xbundle
7132 && xbundle_mirror_out(xbridge, in_port->xbundle)) {
2d9b49dd
BP
7133 xlate_report_error(&ctx, "dropping packet received on port "
7134 "%s, which is reserved exclusively for mirroring",
7135 in_port->xbundle->name);
bef1403e 7136 } else {
1d361a81 7137 /* Sampling is done on initial reception; don't redo after thawing. */
a6092018 7138 unsigned int user_cookie_offset = 0;
1d361a81 7139 if (!xin->frozen_state) {
a6092018
BP
7140 user_cookie_offset = compose_sflow_action(&ctx);
7141 compose_ipfix_action(&ctx, ODPP_NONE);
e672ff9b 7142 }
0731abc5 7143 size_t sample_actions_len = ctx.odp_actions->size;
9583bc14 7144
234c3da9
BP
7145 if (tnl_process_ecn(flow)
7146 && (!in_port || may_receive(in_port, &ctx))) {
1806291d
BP
7147 const struct ofpact *ofpacts;
7148 size_t ofpacts_len;
7149
7150 if (xin->ofpacts) {
7151 ofpacts = xin->ofpacts;
7152 ofpacts_len = xin->ofpacts_len;
7153 } else if (ctx.rule) {
7154 const struct rule_actions *actions
07a3cd5c 7155 = rule_get_actions(&ctx.rule->up);
1806291d
BP
7156 ofpacts = actions->ofpacts;
7157 ofpacts_len = actions->ofpacts_len;
07a3cd5c 7158 ctx.rule_cookie = ctx.rule->up.flow_cookie;
1806291d
BP
7159 } else {
7160 OVS_NOT_REACHED();
7161 }
7162
7efbc3b7 7163 mirror_ingress_packet(&ctx);
feee58b9 7164 do_xlate_actions(ofpacts, ofpacts_len, &ctx, true);
fff1b9c0
JR
7165 if (ctx.error) {
7166 goto exit;
7167 }
9583bc14
EJ
7168
7169 /* We've let OFPP_NORMAL and the learning action look at the
1d361a81 7170 * packet, so cancel all actions and freezing if forwarding is
8a5fb3b4 7171 * disabled. */
9efd308e
DV
7172 if (in_port && (!xport_stp_forward_state(in_port) ||
7173 !xport_rstp_forward_state(in_port))) {
1520ef4f 7174 ctx.odp_actions->size = sample_actions_len;
1d361a81 7175 ctx_cancel_freeze(&ctx);
8a5fb3b4
BP
7176 ofpbuf_clear(&ctx.action_set);
7177 }
7178
1d361a81 7179 if (!ctx.freezing) {
8a5fb3b4 7180 xlate_action_set(&ctx);
e672ff9b 7181 }
1d361a81 7182 if (ctx.freezing) {
77ab5fd2 7183 finish_freezing(&ctx);
9583bc14
EJ
7184 }
7185 }
7186
e672ff9b 7187 /* Output only fully processed packets. */
1d361a81 7188 if (!ctx.freezing
e672ff9b 7189 && xbridge->has_in_band
ce4a6b76
BP
7190 && in_band_must_output_to_local_port(flow)
7191 && !actions_output_to_local_port(&ctx)) {
feee58b9 7192 compose_output_action(&ctx, OFPP_LOCAL, NULL, false);
9583bc14 7193 }
aaa0fbae 7194
a6092018
BP
7195 if (user_cookie_offset) {
7196 fix_sflow_action(&ctx, user_cookie_offset);
e672ff9b 7197 }
9583bc14
EJ
7198 }
7199
1520ef4f 7200 if (nl_attr_oversized(ctx.odp_actions->size)) {
542024c4 7201 /* These datapath actions are too big for a Netlink attribute, so we
0f032e95
BP
7202 * can't hand them to the kernel directly. dpif_execute() can execute
7203 * them one by one with help, so just mark the result as SLOW_ACTION to
7204 * prevent the flow from being installed. */
7205 COVERAGE_INC(xlate_actions_oversize);
7206 ctx.xout->slow |= SLOW_ACTION;
1520ef4f 7207 } else if (too_many_output_actions(ctx.odp_actions)) {
7d031d7e
BP
7208 COVERAGE_INC(xlate_actions_too_many_output);
7209 ctx.xout->slow |= SLOW_ACTION;
542024c4
BP
7210 }
7211
64fb5f82
JP
7212 /* Update NetFlow for non-frozen traffic. */
7213 if (xbridge->netflow && !xin->frozen_state) {
1806291d
BP
7214 if (ctx.xin->resubmit_stats) {
7215 netflow_flow_update(xbridge->netflow, flow,
2031ef97 7216 ctx.nf_output_iface,
1806291d
BP
7217 ctx.xin->resubmit_stats);
7218 }
7219 if (ctx.xin->xcache) {
7220 struct xc_entry *entry;
b256dc52 7221
1806291d 7222 entry = xlate_cache_add_entry(ctx.xin->xcache, XC_NETFLOW);
901a517e
JR
7223 entry->nf.netflow = netflow_ref(xbridge->netflow);
7224 entry->nf.flow = xmemdup(flow, sizeof *flow);
7225 entry->nf.iface = ctx.nf_output_iface;
d6fc5f57
EJ
7226 }
7227 }
7228
8d8ab6c2
JG
7229 /* Translate tunnel metadata masks to udpif format if necessary. */
7230 if (xin->upcall_flow->tunnel.flags & FLOW_TNL_F_UDPIF) {
7231 if (ctx.wc->masks.tunnel.metadata.present.map) {
7232 const struct flow_tnl *upcall_tnl = &xin->upcall_flow->tunnel;
7233 struct geneve_opt opts[TLV_TOT_OPT_SIZE /
7234 sizeof(struct geneve_opt)];
7235
7236 tun_metadata_to_geneve_udpif_mask(&flow->tunnel,
7237 &ctx.wc->masks.tunnel,
7238 upcall_tnl->metadata.opts.gnv,
7239 upcall_tnl->metadata.present.len,
7240 opts);
7241 memset(&ctx.wc->masks.tunnel.metadata, 0,
7242 sizeof ctx.wc->masks.tunnel.metadata);
7243 memcpy(&ctx.wc->masks.tunnel.metadata.opts.gnv, opts,
7244 upcall_tnl->metadata.present.len);
7245 }
7246 ctx.wc->masks.tunnel.metadata.present.len = 0xff;
7247 ctx.wc->masks.tunnel.metadata.tab = NULL;
7248 ctx.wc->masks.tunnel.flags |= FLOW_TNL_F_UDPIF;
7249 } else if (!xin->upcall_flow->tunnel.metadata.tab) {
7250 /* If we didn't have options in UDPIF format and didn't have an existing
7251 * metadata table, then it means that there were no options at all when
7252 * we started processing and any wildcards we picked up were from
7253 * action generation. Without options on the incoming packet, wildcards
7254 * aren't meaningful. To avoid them possibly getting misinterpreted,
7255 * just clear everything. */
7256 if (ctx.wc->masks.tunnel.metadata.present.map) {
7257 memset(&ctx.wc->masks.tunnel.metadata, 0,
7258 sizeof ctx.wc->masks.tunnel.metadata);
7259 } else {
7260 ctx.wc->masks.tunnel.metadata.tab = NULL;
7261 }
7262 }
7263
c0e638aa 7264 xlate_wc_finish(&ctx);
1520ef4f
BP
7265
7266exit:
8d8ab6c2
JG
7267 /* Reset the table to what it was when we came in. If we only fetched
7268 * it locally, then it has no meaning outside of flow translation. */
7269 flow->tunnel.metadata.tab = xin->upcall_flow->tunnel.metadata.tab;
7270
1520ef4f
BP
7271 ofpbuf_uninit(&ctx.stack);
7272 ofpbuf_uninit(&ctx.action_set);
1d361a81 7273 ofpbuf_uninit(&ctx.frozen_actions);
1520ef4f 7274 ofpbuf_uninit(&scratch_actions);
1fc11c59 7275 ofpbuf_delete(ctx.encap_data);
fff1b9c0
JR
7276
7277 /* Make sure we return a "drop flow" in case of an error. */
7278 if (ctx.error) {
7279 xout->slow = 0;
7280 if (xin->odp_actions) {
7281 ofpbuf_clear(xin->odp_actions);
7282 }
7283 }
7284 return ctx.error;
91d6cd12
AW
7285}
7286
77ab5fd2
BP
7287enum ofperr
7288xlate_resume(struct ofproto_dpif *ofproto,
7289 const struct ofputil_packet_in_private *pin,
7290 struct ofpbuf *odp_actions,
7291 enum slow_path_reason *slow)
7292{
7293 struct dp_packet packet;
4d617a87
BP
7294 dp_packet_use_const(&packet, pin->base.packet,
7295 pin->base.packet_len);
77ab5fd2
BP
7296
7297 struct flow flow;
7298 flow_extract(&packet, &flow);
7299
7300 struct xlate_in xin;
1f4a8933
JR
7301 xlate_in_init(&xin, ofproto, ofproto_dpif_get_tables_version(ofproto),
7302 &flow, 0, NULL, ntohs(flow.tcp_flags),
77ab5fd2
BP
7303 &packet, NULL, odp_actions);
7304
7305 struct ofpact_note noop;
7306 ofpact_init_NOTE(&noop);
7307 noop.length = 0;
7308
7309 bool any_actions = pin->actions_len > 0;
7310 struct frozen_state state = {
7311 .table_id = 0, /* Not the table where NXAST_PAUSE was executed. */
7312 .ofproto_uuid = pin->bridge,
7313 .stack = pin->stack,
84cf3c1f 7314 .stack_size = pin->stack_size,
77ab5fd2
BP
7315 .mirrors = pin->mirrors,
7316 .conntracked = pin->conntracked,
7317
7318 /* When there are no actions, xlate_actions() will search the flow
7319 * table. We don't want it to do that (we want it to resume), so
7320 * supply a no-op action if there aren't any.
7321 *
7322 * (We can't necessarily avoid translating actions entirely if there
7323 * aren't any actions, because there might be some finishing-up to do
7324 * at the end of the pipeline, and we don't check for those
7325 * conditions.) */
7326 .ofpacts = any_actions ? pin->actions : &noop.ofpact,
7327 .ofpacts_len = any_actions ? pin->actions_len : sizeof noop,
7328
7329 .action_set = pin->action_set,
7330 .action_set_len = pin->action_set_len,
7331 };
7332 frozen_metadata_from_flow(&state.metadata,
4d617a87 7333 &pin->base.flow_metadata.flow);
77ab5fd2
BP
7334 xin.frozen_state = &state;
7335
7336 struct xlate_out xout;
7337 enum xlate_error error = xlate_actions(&xin, &xout);
7338 *slow = xout.slow;
7339 xlate_out_uninit(&xout);
7340
7341 /* xlate_actions() can generate a number of errors, but only
7342 * XLATE_BRIDGE_NOT_FOUND really stands out to me as one that we should be
7343 * sure to report over OpenFlow. The others could come up in packet-outs
7344 * or regular flow translation and I don't think that it's going to be too
7345 * useful to report them to the controller. */
7346 return error == XLATE_BRIDGE_NOT_FOUND ? OFPERR_NXR_STALE : 0;
7347}
7348
2eb79142
JG
7349/* Sends 'packet' out 'ofport'. If 'port' is a tunnel and that tunnel type
7350 * supports a notion of an OAM flag, sets it if 'oam' is true.
91d6cd12
AW
7351 * May modify 'packet'.
7352 * Returns 0 if successful, otherwise a positive errno value. */
7353int
2eb79142
JG
7354xlate_send_packet(const struct ofport_dpif *ofport, bool oam,
7355 struct dp_packet *packet)
91d6cd12 7356{
84f0f298 7357 struct xlate_cfg *xcfg = ovsrcu_get(struct xlate_cfg *, &xcfgp);
91d6cd12 7358 struct xport *xport;
2eb79142
JG
7359 uint64_t ofpacts_stub[1024 / 8];
7360 struct ofpbuf ofpacts;
91d6cd12 7361 struct flow flow;
91d6cd12 7362
2eb79142 7363 ofpbuf_use_stack(&ofpacts, ofpacts_stub, sizeof ofpacts_stub);
91d6cd12 7364 /* Use OFPP_NONE as the in_port to avoid special packet processing. */
cf62fa4c 7365 flow_extract(packet, &flow);
b5e7e61a 7366 flow.in_port.ofp_port = OFPP_NONE;
91d6cd12 7367
84f0f298 7368 xport = xport_lookup(xcfg, ofport);
91d6cd12 7369 if (!xport) {
02ea2703 7370 return EINVAL;
91d6cd12 7371 }
2eb79142
JG
7372
7373 if (oam) {
71f21279
BP
7374 const ovs_be16 flag = htons(NX_TUN_FLAG_OAM);
7375 ofpact_put_set_field(&ofpacts, mf_from_id(MFF_TUN_FLAGS),
7376 &flag, &flag);
2eb79142
JG
7377 }
7378
7379 ofpact_put_OUTPUT(&ofpacts)->port = xport->ofp_port;
e491a67a 7380
1f4a8933
JR
7381 /* Actions here are not referring to anything versionable (flow tables or
7382 * groups) so we don't need to worry about the version here. */
7383 return ofproto_dpif_execute_actions(xport->xbridge->ofproto,
7384 OVS_VERSION_MAX, &flow, NULL,
2eb79142 7385 ofpacts.data, ofpacts.size, packet);
9583bc14 7386}
b256dc52 7387
901a517e 7388void
064799a1
JR
7389xlate_mac_learning_update(const struct ofproto_dpif *ofproto,
7390 ofp_port_t in_port, struct eth_addr dl_src,
7391 int vlan, bool is_grat_arp)
b256dc52 7392{
84f0f298 7393 struct xlate_cfg *xcfg = ovsrcu_get(struct xlate_cfg *, &xcfgp);
b256dc52
JS
7394 struct xbridge *xbridge;
7395 struct xbundle *xbundle;
b256dc52 7396
84f0f298 7397 xbridge = xbridge_lookup(xcfg, ofproto);
b256dc52
JS
7398 if (!xbridge) {
7399 return;
7400 }
7401
2d9b49dd 7402 xbundle = lookup_input_bundle__(xbridge, in_port, NULL);
b256dc52
JS
7403 if (!xbundle) {
7404 return;
7405 }
7406
2d9b49dd 7407 update_learning_table__(xbridge, xbundle, dl_src, vlan, is_grat_arp);
b256dc52 7408}
bef503e8 7409
88186383
AZ
7410void
7411xlate_set_support(const struct ofproto_dpif *ofproto,
7412 const struct dpif_backer_support *support)
7413{
7414 struct xlate_cfg *xcfg = ovsrcu_get(struct xlate_cfg *, &xcfgp);
7415 struct xbridge *xbridge = xbridge_lookup(xcfg, ofproto);
7416
7417 if (xbridge) {
7418 xbridge->support = *support;
7419 }
7420}