]> git.proxmox.com Git - ovs.git/blame - ofproto/ofproto-dpif-xlate.c
ofproto: Do not delete datapath flows on exit by default.
[ovs.git] / ofproto / ofproto-dpif-xlate.c
CommitLineData
b1986b08 1/* Copyright (c) 2009, 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017, 2019 Nicira, Inc.
9583bc14
EJ
2 *
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License. */
14
15#include <config.h>
16
17#include "ofproto/ofproto-dpif-xlate.h"
18
8449c4d6 19#include <errno.h>
b2befd5b
BP
20#include <sys/types.h>
21#include <netinet/in.h>
a36de779
PS
22#include <arpa/inet.h>
23#include <net/if.h>
24#include <sys/socket.h>
8449c4d6 25
db7d4e46 26#include "bfd.h"
9583bc14
EJ
27#include "bitmap.h"
28#include "bond.h"
29#include "bundle.h"
30#include "byte-order.h"
db7d4e46 31#include "cfm.h"
9583bc14
EJ
32#include "connmgr.h"
33#include "coverage.h"
46445c63 34#include "csum.h"
e14deea0 35#include "dp-packet.h"
9583bc14 36#include "dpif.h"
f7f1ea29 37#include "in-band.h"
db7d4e46 38#include "lacp.h"
9583bc14
EJ
39#include "learn.h"
40#include "mac-learning.h"
6d95c4e8 41#include "mcast-snooping.h"
9583bc14
EJ
42#include "multipath.h"
43#include "netdev-vport.h"
44#include "netlink.h"
45#include "nx-match.h"
46#include "odp-execute.h"
9583bc14 47#include "ofproto/ofproto-dpif-ipfix.h"
ec7ceaed 48#include "ofproto/ofproto-dpif-mirror.h"
60d02c72 49#include "ofproto/ofproto-dpif-monitor.h"
9583bc14 50#include "ofproto/ofproto-dpif-sflow.h"
2d9b49dd 51#include "ofproto/ofproto-dpif-trace.h"
901a517e 52#include "ofproto/ofproto-dpif-xlate-cache.h"
9583bc14 53#include "ofproto/ofproto-dpif.h"
6f00e29b 54#include "ofproto/ofproto-provider.h"
b598f214
BW
55#include "openvswitch/dynamic-string.h"
56#include "openvswitch/meta-flow.h"
57#include "openvswitch/list.h"
58#include "openvswitch/ofp-actions.h"
1fc11c59 59#include "openvswitch/ofp-ed-props.h"
b598f214
BW
60#include "openvswitch/vlog.h"
61#include "ovs-lldp.h"
a36de779 62#include "ovs-router.h"
b598f214
BW
63#include "packets.h"
64#include "tnl-neigh-cache.h"
a36de779 65#include "tnl-ports.h"
9583bc14 66#include "tunnel.h"
ee89ea7b 67#include "util.h"
c3594cc3 68#include "uuid.h"
9583bc14 69
46c88433 70COVERAGE_DEFINE(xlate_actions);
0f032e95 71COVERAGE_DEFINE(xlate_actions_oversize);
7d031d7e 72COVERAGE_DEFINE(xlate_actions_too_many_output);
9583bc14
EJ
73
74VLOG_DEFINE_THIS_MODULE(ofproto_dpif_xlate);
75
8a553e9a 76/* Maximum depth of flow table recursion (due to resubmit actions) in a
790c5d26
BP
77 * flow translation.
78 *
79 * The goal of limiting the depth of resubmits is to ensure that flow
80 * translation eventually terminates. Only resubmits to the same table or an
81 * earlier table count against the maximum depth. This is because resubmits to
82 * strictly monotonically increasing table IDs will eventually terminate, since
83 * any OpenFlow switch has a finite number of tables. OpenFlow tables are most
84 * commonly traversed in numerically increasing order, so this limit has little
85 * effect on conventionally designed OpenFlow pipelines.
86 *
87 * Outputs to patch ports and to groups also count against the depth limit. */
88#define MAX_DEPTH 64
8a553e9a 89
98b07853
BP
90/* Maximum number of resubmit actions in a flow translation, whether they are
91 * recursive or not. */
790c5d26 92#define MAX_RESUBMITS (MAX_DEPTH * MAX_DEPTH)
98b07853 93
83c2757b
ZB
94/* The structure holds an array of IP addresses assigned to a bridge and the
95 * number of elements in the array. These data are mutable and are evaluated
96 * when ARP or Neighbor Advertisement packets received on a native tunnel
97 * port are xlated. So 'ref_cnt' and RCU are used for synchronization. */
98struct xbridge_addr {
99 struct in6_addr *addr; /* Array of IP addresses of xbridge. */
100 int n_addr; /* Number of IP addresses. */
101 struct ovs_refcount ref_cnt;
102};
103
46c88433
EJ
104struct xbridge {
105 struct hmap_node hmap_node; /* Node in global 'xbridges' map. */
106 struct ofproto_dpif *ofproto; /* Key in global 'xbridges' map. */
107
ca6ba700 108 struct ovs_list xbundles; /* Owned xbundles. */
46c88433
EJ
109 struct hmap xports; /* Indexed by ofp_port. */
110
111 char *name; /* Name used in log messages. */
89a8a7f0 112 struct dpif *dpif; /* Datapath interface. */
46c88433 113 struct mac_learning *ml; /* Mac learning handle. */
6d95c4e8 114 struct mcast_snooping *ms; /* Multicast Snooping handle. */
46c88433
EJ
115 struct mbridge *mbridge; /* Mirroring. */
116 struct dpif_sflow *sflow; /* SFlow handle, or null. */
117 struct dpif_ipfix *ipfix; /* Ipfix handle, or null. */
ce3955be 118 struct netflow *netflow; /* Netflow handle, or null. */
9d189a50 119 struct stp *stp; /* STP or null if disabled. */
9efd308e 120 struct rstp *rstp; /* RSTP or null if disabled. */
46c88433 121
46c88433
EJ
122 bool has_in_band; /* Bridge has in band control? */
123 bool forward_bpdu; /* Bridge forwards STP BPDUs? */
4b97b70d 124
b440dd8c
JS
125 /* Datapath feature support. */
126 struct dpif_backer_support support;
83c2757b
ZB
127
128 struct xbridge_addr *addr;
46c88433
EJ
129};
130
131struct xbundle {
132 struct hmap_node hmap_node; /* In global 'xbundles' map. */
133 struct ofbundle *ofbundle; /* Key in global 'xbundles' map. */
134
ca6ba700 135 struct ovs_list list_node; /* In parent 'xbridges' list. */
46c88433
EJ
136 struct xbridge *xbridge; /* Parent xbridge. */
137
ca6ba700 138 struct ovs_list xports; /* Contains "struct xport"s. */
46c88433
EJ
139
140 char *name; /* Name used in log messages. */
141 struct bond *bond; /* Nonnull iff more than one port. */
142 struct lacp *lacp; /* LACP handle or null. */
143
144 enum port_vlan_mode vlan_mode; /* VLAN mode. */
fed8962a
EG
145 uint16_t qinq_ethtype; /* Ethertype of dot1q-tunnel interface
146 * either 0x8100 or 0x88a8. */
46c88433
EJ
147 int vlan; /* -1=trunk port, else a 12-bit VLAN ID. */
148 unsigned long *trunks; /* Bitmap of trunked VLANs, if 'vlan' == -1.
149 * NULL if all VLANs are trunked. */
fed8962a
EG
150 unsigned long *cvlans; /* Bitmap of allowed customer vlans,
151 * NULL if all VLANs are allowed */
88f52d7f
EB
152 enum port_priority_tags_mode use_priority_tags;
153 /* Use 802.1p tag for frames in VLAN 0? */
46c88433 154 bool floodable; /* No port has OFPUTIL_PC_NO_FLOOD set? */
c005f976 155 bool protected; /* Protected port mode */
46c88433
EJ
156};
157
158struct xport {
159 struct hmap_node hmap_node; /* Node in global 'xports' map. */
160 struct ofport_dpif *ofport; /* Key in global 'xports map. */
161
162 struct hmap_node ofp_node; /* Node in parent xbridge 'xports' map. */
163 ofp_port_t ofp_port; /* Key in parent xbridge 'xports' map. */
164
c3594cc3
ZB
165 struct hmap_node uuid_node; /* Node in global 'xports_uuid' map. */
166 struct uuid uuid; /* Key in global 'xports_uuid' map. */
167
46c88433
EJ
168 odp_port_t odp_port; /* Datapath port number or ODPP_NONE. */
169
ca6ba700 170 struct ovs_list bundle_node; /* In parent xbundle (if it exists). */
46c88433
EJ
171 struct xbundle *xbundle; /* Parent xbundle or null. */
172
173 struct netdev *netdev; /* 'ofport''s netdev. */
174
175 struct xbridge *xbridge; /* Parent bridge. */
176 struct xport *peer; /* Patch port peer or null. */
177
178 enum ofputil_port_config config; /* OpenFlow port configuration. */
dd8cd4b4 179 enum ofputil_port_state state; /* OpenFlow port state. */
92cf817b 180 int stp_port_no; /* STP port number or -1 if not in use. */
f025bcb7 181 struct rstp_port *rstp_port; /* RSTP port or null. */
46c88433 182
55954f6e
EJ
183 struct hmap skb_priorities; /* Map of 'skb_priority_to_dscp's. */
184
46c88433
EJ
185 bool may_enable; /* May be enabled in bonds. */
186 bool is_tunnel; /* Is a tunnel port. */
875ab130 187 enum netdev_pt_mode pt_mode; /* packet_type handling. */
46c88433
EJ
188
189 struct cfm *cfm; /* CFM handle or null. */
190 struct bfd *bfd; /* BFD handle or null. */
0477baa9 191 struct lldp *lldp; /* LLDP handle or null. */
46c88433
EJ
192};
193
4d0acc70
EJ
194struct xlate_ctx {
195 struct xlate_in *xin;
196 struct xlate_out *xout;
197
0506f184 198 struct xlate_cfg *xcfg;
46c88433 199 const struct xbridge *xbridge;
4d0acc70
EJ
200
201 /* Flow at the last commit. */
202 struct flow base_flow;
203
204 /* Tunnel IP destination address as received. This is stored separately
205 * as the base_flow.tunnel is cleared on init to reflect the datapath
206 * behavior. Used to make sure not to send tunneled output to ourselves,
207 * which might lead to an infinite loop. This could happen easily
208 * if a tunnel is marked as 'ip_remote=flow', and the flow does not
209 * actually set the tun_dst field. */
e4d3706c 210 struct in6_addr orig_tunnel_ipv6_dst;
4d0acc70 211
84cf3c1f
JR
212 /* Stack for the push and pop actions. See comment above nx_stack_push()
213 * in nx-match.c for info on how the stack is stored. */
4d0acc70
EJ
214 struct ofpbuf stack;
215
216 /* The rule that we are currently translating, or NULL. */
217 struct rule_dpif *rule;
218
49a73e0c
BP
219 /* Flow translation populates this with wildcards relevant in translation.
220 * When 'xin->wc' is nonnull, this is the same pointer. When 'xin->wc' is
c0e638aa 221 * null, this is a pointer to a temporary buffer. */
49a73e0c
BP
222 struct flow_wildcards *wc;
223
1520ef4f
BP
224 /* Output buffer for datapath actions. When 'xin->odp_actions' is nonnull,
225 * this is the same pointer. When 'xin->odp_actions' is null, this points
226 * to a scratch ofpbuf. This allows code to add actions to
227 * 'ctx->odp_actions' without worrying about whether the caller really
228 * wants actions. */
229 struct ofpbuf *odp_actions;
230
790c5d26
BP
231 /* Statistics maintained by xlate_table_action().
232 *
2d9b49dd 233 * These statistics limit the amount of work that a single flow
790c5d26
BP
234 * translation can perform. The goal of the first of these, 'depth', is
235 * primarily to prevent translation from performing an infinite amount of
236 * work. It counts the current depth of nested "resubmit"s (and a few
237 * other activities); when a resubmit returns, it decreases. Resubmits to
238 * tables in strictly monotonically increasing order don't contribute to
239 * 'depth' because they cannot cause a flow translation to take an infinite
240 * amount of time (because the number of tables is finite). Translation
241 * aborts when 'depth' exceeds MAX_DEPTH.
242 *
243 * 'resubmits', on the other hand, prevents flow translation from
244 * performing an extraordinarily large while still finite amount of work.
245 * It counts the total number of resubmits (and a few other activities)
246 * that have been executed. Returning from a resubmit does not affect this
247 * counter. Thus, this limits the amount of work that a particular
248 * translation can perform. Translation aborts when 'resubmits' exceeds
249 * MAX_RESUBMITS (which is much larger than MAX_DEPTH).
250 */
790c5d26 251 int depth; /* Current resubmit nesting depth. */
98b07853 252 int resubmits; /* Total number of resubmits. */
029ca940 253 bool in_action_set; /* Currently translating action_set, if true. */
331c07ac
YHW
254 bool in_packet_out; /* Currently translating a packet_out msg, if
255 * true. */
1fc11c59
JS
256 bool pending_encap; /* True when waiting to commit a pending
257 * encap action. */
88ec1e0a
JS
258 bool pending_decap; /* True when waiting to commit a pending
259 * decap action. */
1fc11c59
JS
260 struct ofpbuf *encap_data; /* May contain a pointer to an ofpbuf with
261 * context for the datapath encap action.*/
98b07853 262
4d0acc70 263 uint8_t table_id; /* OpenFlow table ID where flow was found. */
8b1e5560
JR
264 ovs_be64 rule_cookie; /* Cookie of the rule being translated. */
265 uint32_t orig_skb_priority; /* Priority when packet arrived. */
4d0acc70 266 uint32_t sflow_n_outputs; /* Number of output ports. */
4e022ec0 267 odp_port_t sflow_odp_port; /* Output port for composing sFlow action. */
2031ef97 268 ofp_port_t nf_output_iface; /* Output interface index for NetFlow. */
4d0acc70 269 bool exit; /* No further actions should be processed. */
3d6151f3 270 mirror_mask_t mirrors; /* Bitmap of associated mirrors. */
1356dbd1 271 int mirror_snaplen; /* Max size of a mirror packet in byte. */
7fdb60a7 272
1d361a81
BP
273 /* Freezing Translation
274 * ====================
e672ff9b 275 *
1d361a81
BP
276 * At some point during translation, the code may recognize the need to halt
277 * and checkpoint the translation in a way that it can be restarted again
278 * later. We call the checkpointing process "freezing" and the restarting
279 * process "thawing".
e672ff9b 280 *
1d361a81 281 * The use cases for freezing are:
e672ff9b 282 *
1d361a81
BP
283 * - "Recirculation", where the translation process discovers that it
284 * doesn't have enough information to complete translation without
285 * actually executing the actions that have already been translated,
286 * which provides the additionally needed information. In these
287 * situations, translation freezes translation and assigns the frozen
288 * data a unique "recirculation ID", which it associates with the data
289 * in a table in userspace (see ofproto-dpif-rid.h). It also adds a
290 * OVS_ACTION_ATTR_RECIRC action specifying that ID to the datapath
291 * actions. When a packet hits that action, the datapath looks its
292 * flow up again using the ID. If there's a miss, it comes back to
293 * userspace, which find the recirculation table entry for the ID,
294 * thaws the associated frozen data, and continues translation from
295 * that point given the additional information that is now known.
e672ff9b 296 *
1d361a81
BP
297 * The archetypal example is MPLS. As MPLS is implemented in
298 * OpenFlow, the protocol that follows the last MPLS label becomes
299 * known only when that label is popped by an OpenFlow action. That
300 * means that Open vSwitch can't extract the headers beyond the MPLS
301 * labels until the pop action is executed. Thus, at that point
302 * translation uses the recirculation process to extract the headers
303 * beyond the MPLS labels.
e672ff9b 304 *
1d361a81
BP
305 * (OVS also uses OVS_ACTION_ATTR_RECIRC to implement hashing for
306 * output to bonds. OVS pre-populates all the datapath flows for bond
307 * output in the datapath, though, which means that the elaborate
308 * process of coming back to userspace for a second round of
309 * translation isn't needed, and so bonds don't follow the above
310 * process.)
e672ff9b 311 *
77ab5fd2
BP
312 * - "Continuation". A continuation is a way for an OpenFlow controller
313 * to interpose on a packet's traversal of the OpenFlow tables. When
314 * the translation process encounters a "controller" action with the
315 * "pause" flag, it freezes translation, serializes the frozen data,
316 * and sends it to an OpenFlow controller. The controller then
317 * examines and possibly modifies the frozen data and eventually sends
318 * it back to the switch, which thaws it and continues translation.
e672ff9b 319 *
1d361a81
BP
320 * The main problem of freezing translation is preserving state, so that
321 * when the translation is thawed later it resumes from where it left off,
322 * without disruption. In particular, actions must be preserved as follows:
323 *
324 * - If we're freezing because an action needed more information, the
325 * action that prompted it.
326 *
327 * - Any actions remaining to be translated within the current flow.
328 *
329 * - If translation was frozen within a NXAST_RESUBMIT, then any actions
330 * following the resubmit action. Resubmit actions can be nested, so
331 * this has to go all the way up the control stack.
e672ff9b
JR
332 *
333 * - The OpenFlow 1.1+ action set.
334 *
335 * State that actions and flow table lookups can depend on, such as the
336 * following, must also be preserved:
337 *
338 * - Metadata fields (input port, registers, OF1.1+ metadata, ...).
339 *
1d361a81 340 * - The stack used by NXAST_STACK_PUSH and NXAST_STACK_POP actions.
e672ff9b
JR
341 *
342 * - The table ID and cookie of the flow being translated at each level
1d361a81
BP
343 * of the control stack, because these can become visible through
344 * OFPAT_CONTROLLER actions (and other ways).
e672ff9b
JR
345 *
346 * Translation allows for the control of this state preservation via these
1d361a81
BP
347 * members. When a need to freeze translation is identified, the
348 * translation process:
e672ff9b 349 *
1d361a81 350 * 1. Sets 'freezing' to true.
e672ff9b
JR
351 *
352 * 2. Sets 'exit' to true to tell later steps that we're exiting from the
353 * translation process.
354 *
1d361a81
BP
355 * 3. Adds an OFPACT_UNROLL_XLATE action to 'frozen_actions', and points
356 * frozen_actions.header to the action to make it easy to find it later.
357 * This action holds the current table ID and cookie so that they can be
358 * restored during a post-recirculation upcall translation.
e672ff9b
JR
359 *
360 * 4. Adds the action that prompted recirculation and any actions following
1d361a81 361 * it within the same flow to 'frozen_actions', so that they can be
8a5fb3b4 362 * executed during a post-recirculation upcall translation.
e672ff9b
JR
363 *
364 * 5. Returns.
365 *
366 * 6. The action that prompted recirculation might be nested in a stack of
367 * nested "resubmit"s that have actions remaining. Each of these notices
1d361a81
BP
368 * that we're exiting and freezing and responds by adding more
369 * OFPACT_UNROLL_XLATE actions to 'frozen_actions', as necessary,
370 * followed by any actions that were yet unprocessed.
e672ff9b 371 *
1d361a81
BP
372 * If we're freezing because of recirculation, the caller generates a
373 * recirculation ID and associates all the state produced by this process
374 * with it. For post-recirculation upcall translation, the caller passes it
375 * back in for the new translation to execute. The process yielded a set of
376 * ofpacts that can be translated directly, so it is not much of a special
377 * case at that point.
e672ff9b 378 */
1d361a81 379 bool freezing;
53cc166a
JR
380 bool recirc_update_dp_hash; /* Generated recirculation will be preceded
381 * by datapath HASH action to get an updated
382 * dp_hash after recirculation. */
383 uint32_t dp_hash_alg;
384 uint32_t dp_hash_basis;
1d361a81 385 struct ofpbuf frozen_actions;
77ab5fd2 386 const struct ofpact_controller *pause;
e672ff9b 387
e12ec36b
SH
388 /* True if a packet was but is no longer MPLS (due to an MPLS pop action).
389 * This is a trigger for recirculation in cases where translating an action
390 * or looking up a flow requires access to the fields of the packet after
391 * the MPLS label stack that was originally present. */
392 bool was_mpls;
393
07659514
JS
394 /* True if conntrack has been performed on this packet during processing
395 * on the current bridge. This is used to determine whether conntrack
1d361a81 396 * state from the datapath should be honored after thawing. */
07659514
JS
397 bool conntracked;
398
9ac0aada
JR
399 /* Pointer to an embedded NAT action in a conntrack action, or NULL. */
400 struct ofpact_nat *ct_nat_action;
401
7fdb60a7
SH
402 /* OpenFlow 1.1+ action set.
403 *
404 * 'action_set' accumulates "struct ofpact"s added by OFPACT_WRITE_ACTIONS.
405 * When translation is otherwise complete, ofpacts_execute_action_set()
406 * converts it to a set of "struct ofpact"s that can be translated into
ed9c9e3e 407 * datapath actions. */
c61f3870 408 bool action_set_has_group; /* Action set contains OFPACT_GROUP? */
7fdb60a7 409 struct ofpbuf action_set; /* Action set. */
fff1b9c0
JR
410
411 enum xlate_error error; /* Translation failed. */
4d0acc70
EJ
412};
413
f0fb825a
EG
414/* Structure to track VLAN manipulation */
415struct xvlan_single {
416 uint16_t tpid;
417 uint16_t vid;
418 uint16_t pcp;
419};
420
421struct xvlan {
422 struct xvlan_single v[FLOW_MAX_VLAN_HEADERS];
423};
424
fff1b9c0
JR
425const char *xlate_strerror(enum xlate_error error)
426{
427 switch (error) {
428 case XLATE_OK:
429 return "OK";
430 case XLATE_BRIDGE_NOT_FOUND:
431 return "Bridge not found";
432 case XLATE_RECURSION_TOO_DEEP:
433 return "Recursion too deep";
434 case XLATE_TOO_MANY_RESUBMITS:
435 return "Too many resubmits";
436 case XLATE_STACK_TOO_DEEP:
437 return "Stack too deep";
438 case XLATE_NO_RECIRCULATION_CONTEXT:
439 return "No recirculation context";
440 case XLATE_RECIRCULATION_CONFLICT:
441 return "Recirculation conflict";
442 case XLATE_TOO_MANY_MPLS_LABELS:
443 return "Too many MPLS labels";
8d8ab6c2
JG
444 case XLATE_INVALID_TUNNEL_METADATA:
445 return "Invalid tunnel metadata";
7873e106
ZB
446 case XLATE_UNSUPPORTED_PACKET_TYPE:
447 return "Unsupported packet type";
a13a0209
AT
448 case XLATE_CONGESTION_DROP:
449 return "Congestion Drop";
450 case XLATE_FORWARDING_DISABLED:
451 return "Forwarding is disabled";
452 case XLATE_MAX:
453 break;
fff1b9c0
JR
454 }
455 return "Unknown error";
456}
457
ed9c9e3e 458static void xlate_action_set(struct xlate_ctx *ctx);
704bb0bf 459static void xlate_commit_actions(struct xlate_ctx *ctx);
ed9c9e3e 460
8bdb2bdb 461static void
48f704f4
AZ
462patch_port_output(struct xlate_ctx *ctx, const struct xport *in_dev,
463 struct xport *out_dev);
8bdb2bdb 464
1d741d6d 465static void
1d361a81 466ctx_trigger_freeze(struct xlate_ctx *ctx)
1d741d6d
JR
467{
468 ctx->exit = true;
1d361a81 469 ctx->freezing = true;
1d741d6d
JR
470}
471
53cc166a
JR
472static void
473ctx_trigger_recirculate_with_hash(struct xlate_ctx *ctx, uint32_t type,
474 uint32_t basis)
475{
476 ctx->exit = true;
477 ctx->freezing = true;
478 ctx->recirc_update_dp_hash = true;
479 ctx->dp_hash_alg = type;
480 ctx->dp_hash_basis = basis;
481}
482
1d741d6d 483static bool
1d361a81 484ctx_first_frozen_action(const struct xlate_ctx *ctx)
1d741d6d 485{
1d361a81 486 return !ctx->frozen_actions.size;
e672ff9b
JR
487}
488
3293cb85 489static void
1d361a81 490ctx_cancel_freeze(struct xlate_ctx *ctx)
3293cb85 491{
1d361a81
BP
492 if (ctx->freezing) {
493 ctx->freezing = false;
53cc166a 494 ctx->recirc_update_dp_hash = false;
1d361a81
BP
495 ofpbuf_clear(&ctx->frozen_actions);
496 ctx->frozen_actions.header = NULL;
5b34f8fc 497 ctx->pause = NULL;
3293cb85
BP
498 }
499}
500
77ab5fd2 501static void finish_freezing(struct xlate_ctx *ctx);
e672ff9b 502
9583bc14
EJ
503/* A controller may use OFPP_NONE as the ingress port to indicate that
504 * it did not arrive on a "real" port. 'ofpp_none_bundle' exists for
505 * when an input bundle is needed for validation (e.g., mirroring or
506 * OFPP_NORMAL processing). It is not connected to an 'ofproto' or have
3548d242
BP
507 * any 'port' structs, so care must be taken when dealing with it. */
508static struct xbundle ofpp_none_bundle = {
509 .name = "OFPP_NONE",
510 .vlan_mode = PORT_VLAN_TRUNK
511};
9583bc14 512
55954f6e
EJ
513/* Node in 'xport''s 'skb_priorities' map. Used to maintain a map from
514 * 'priority' (the datapath's term for QoS queue) to the dscp bits which all
515 * traffic egressing the 'ofport' with that priority should be marked with. */
516struct skb_priority_to_dscp {
517 struct hmap_node hmap_node; /* Node in 'ofport_dpif''s 'skb_priorities'. */
518 uint32_t skb_priority; /* Priority of this queue (see struct flow). */
519
520 uint8_t dscp; /* DSCP bits to mark outgoing traffic with. */
521};
522
84f0f298
RW
523/* Xlate config contains hash maps of all bridges, bundles and ports.
524 * Xcfgp contains the pointer to the current xlate configuration.
525 * When the main thread needs to change the configuration, it copies xcfgp to
526 * new_xcfg and edits new_xcfg. This enables the use of RCU locking which
527 * does not block handler and revalidator threads. */
528struct xlate_cfg {
529 struct hmap xbridges;
530 struct hmap xbundles;
531 struct hmap xports;
c3594cc3 532 struct hmap xports_uuid;
84f0f298 533};
b1b72f2d 534static OVSRCU_TYPE(struct xlate_cfg *) xcfgp = OVSRCU_INITIALIZER(NULL);
f439f23b 535static struct xlate_cfg *new_xcfg = NULL;
46c88433 536
96c3a6e5 537typedef void xlate_actions_handler(const struct ofpact *, size_t ofpacts_len,
f5634764 538 struct xlate_ctx *, bool, bool);
46c88433 539static bool may_receive(const struct xport *, struct xlate_ctx *);
9583bc14 540static void do_xlate_actions(const struct ofpact *, size_t ofpacts_len,
f5634764 541 struct xlate_ctx *, bool, bool);
96c3a6e5 542static void clone_xlate_actions(const struct ofpact *, size_t ofpacts_len,
f5634764 543 struct xlate_ctx *, bool, bool);
adcf00ba 544static void xlate_normal(struct xlate_ctx *);
efc93e66
BP
545static void xlate_normal_flood(struct xlate_ctx *ct,
546 struct xbundle *in_xbundle, struct xvlan *);
6d328fa2
SH
547static void xlate_table_action(struct xlate_ctx *, ofp_port_t in_port,
548 uint8_t table_id, bool may_packet_in,
feee58b9 549 bool honor_table_miss, bool with_ct_orig,
96c3a6e5
AZ
550 bool is_last_action, xlate_actions_handler *);
551
2d9b49dd
BP
552static bool input_vid_is_valid(const struct xlate_ctx *,
553 uint16_t vid, struct xbundle *);
f0fb825a
EG
554static void xvlan_copy(struct xvlan *dst, const struct xvlan *src);
555static void xvlan_pop(struct xvlan *src);
fed8962a 556static void xvlan_push_uninit(struct xvlan *src);
f0fb825a 557static void xvlan_extract(const struct flow *, struct xvlan *);
ffbe41db
EB
558static void xvlan_put(struct flow *, const struct xvlan *,
559 enum port_priority_tags_mode);
f0fb825a
EG
560static void xvlan_input_translate(const struct xbundle *,
561 const struct xvlan *in,
562 struct xvlan *xvlan);
563static void xvlan_output_translate(const struct xbundle *,
564 const struct xvlan *xvlan,
565 struct xvlan *out);
46c88433 566static void output_normal(struct xlate_ctx *, const struct xbundle *,
f0fb825a 567 const struct xvlan *);
e93ef1c7
JR
568
569/* Optional bond recirculation parameter to compose_output_action(). */
570struct xlate_bond_recirc {
571 uint32_t recirc_id; /* !0 Use recirculation instead of output. */
572 uint8_t hash_alg; /* !0 Compute hash for recirc before. */
573 uint32_t hash_basis; /* Compute hash for recirc before. */
574};
575
576static void compose_output_action(struct xlate_ctx *, ofp_port_t ofp_port,
feee58b9 577 const struct xlate_bond_recirc *xr,
11938578 578 bool is_last_action, bool truncate);
9583bc14 579
84f0f298
RW
580static struct xbridge *xbridge_lookup(struct xlate_cfg *,
581 const struct ofproto_dpif *);
290835f9
BP
582static struct xbridge *xbridge_lookup_by_uuid(struct xlate_cfg *,
583 const struct uuid *);
84f0f298
RW
584static struct xbundle *xbundle_lookup(struct xlate_cfg *,
585 const struct ofbundle *);
586static struct xport *xport_lookup(struct xlate_cfg *,
587 const struct ofport_dpif *);
00135b86
ZB
588static struct xport *xport_lookup_by_uuid(struct xlate_cfg *,
589 const struct uuid *);
46c88433 590static struct xport *get_ofp_port(const struct xbridge *, ofp_port_t ofp_port);
55954f6e
EJ
591static struct skb_priority_to_dscp *get_skb_priority(const struct xport *,
592 uint32_t skb_priority);
593static void clear_skb_priorities(struct xport *);
16194afd 594static size_t count_skb_priorities(const struct xport *);
55954f6e
EJ
595static bool dscp_from_skb_priority(const struct xport *, uint32_t skb_priority,
596 uint8_t *dscp);
46c88433 597
84f0f298
RW
598static void xlate_xbridge_init(struct xlate_cfg *, struct xbridge *);
599static void xlate_xbundle_init(struct xlate_cfg *, struct xbundle *);
600static void xlate_xport_init(struct xlate_cfg *, struct xport *);
9efd308e 601static void xlate_xbridge_set(struct xbridge *, struct dpif *,
9efd308e
DV
602 const struct mac_learning *, struct stp *,
603 struct rstp *, const struct mcast_snooping *,
604 const struct mbridge *,
605 const struct dpif_sflow *,
606 const struct dpif_ipfix *,
2f47cdf4 607 const struct netflow *,
84f0f298 608 bool forward_bpdu, bool has_in_band,
83c2757b
ZB
609 const struct dpif_backer_support *,
610 const struct xbridge_addr *);
84f0f298 611static void xlate_xbundle_set(struct xbundle *xbundle,
f0fb825a 612 enum port_vlan_mode vlan_mode,
fed8962a
EG
613 uint16_t qinq_ethtype, int vlan,
614 unsigned long *trunks, unsigned long *cvlans,
88f52d7f 615 enum port_priority_tags_mode,
84f0f298 616 const struct bond *bond, const struct lacp *lacp,
c005f976 617 bool floodable, bool protected);
84f0f298
RW
618static void xlate_xport_set(struct xport *xport, odp_port_t odp_port,
619 const struct netdev *netdev, const struct cfm *cfm,
0477baa9
DF
620 const struct bfd *bfd, const struct lldp *lldp,
621 int stp_port_no, const struct rstp_port *rstp_port,
84f0f298
RW
622 enum ofputil_port_config config,
623 enum ofputil_port_state state, bool is_tunnel,
624 bool may_enable);
625static void xlate_xbridge_remove(struct xlate_cfg *, struct xbridge *);
626static void xlate_xbundle_remove(struct xlate_cfg *, struct xbundle *);
627static void xlate_xport_remove(struct xlate_cfg *, struct xport *);
628static void xlate_xbridge_copy(struct xbridge *);
629static void xlate_xbundle_copy(struct xbridge *, struct xbundle *);
630static void xlate_xport_copy(struct xbridge *, struct xbundle *,
631 struct xport *);
632static void xlate_xcfg_free(struct xlate_cfg *);
2d9b49dd
BP
633\f
634/* Tracing helpers. */
635
636/* If tracing is enabled in 'ctx', creates a new trace node and appends it to
637 * the list of nodes maintained in ctx->xin. The new node has type 'type' and
638 * its text is created from 'format' by treating it as a printf format string.
639 * Returns the list of nodes embedded within the new trace node; ordinarily,
640 * the calleer can ignore this, but it is useful if the caller needs to nest
641 * more trace nodes within the new node.
642 *
643 * If tracing is not enabled, does nothing and returns NULL. */
644static struct ovs_list * OVS_PRINTF_FORMAT(3, 4)
645xlate_report(const struct xlate_ctx *ctx, enum oftrace_node_type type,
646 const char *format, ...)
34dd0d78 647{
2d9b49dd
BP
648 struct ovs_list *subtrace = NULL;
649 if (OVS_UNLIKELY(ctx->xin->trace)) {
c1b3756c 650 va_list args;
c1b3756c 651 va_start(args, format);
2d9b49dd
BP
652 char *text = xvasprintf(format, args);
653 subtrace = &oftrace_report(ctx->xin->trace, type, text)->subs;
c1b3756c 654 va_end(args);
2d9b49dd 655 free(text);
34dd0d78 656 }
2d9b49dd 657 return subtrace;
34dd0d78 658}
84f0f298 659
2d9b49dd
BP
660/* This is like xlate_report() for errors that are serious enough that we
661 * should log them even if we are not tracing. */
662static void OVS_PRINTF_FORMAT(2, 3)
663xlate_report_error(const struct xlate_ctx *ctx, const char *format, ...)
664{
665 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
666 if (!OVS_UNLIKELY(ctx->xin->trace)
667 && (!ctx->xin->packet || VLOG_DROP_WARN(&rl))) {
668 return;
669 }
670
671 struct ds s = DS_EMPTY_INITIALIZER;
672 va_list args;
673 va_start(args, format);
674 ds_put_format_valist(&s, format, args);
675 va_end(args);
676
677 if (ctx->xin->trace) {
678 oftrace_report(ctx->xin->trace, OFT_ERROR, ds_cstr(&s));
679 } else {
a4c3463d
BP
680 ds_put_format(&s, " on bridge %s while processing ",
681 ctx->xbridge->name);
50f96b10 682 flow_format(&s, &ctx->base_flow, NULL);
2d9b49dd
BP
683 VLOG_WARN("%s", ds_cstr(&s));
684 }
685 ds_destroy(&s);
686}
687
f89547e2
ZW
688/* This is like xlate_report() for messages that should be logged
689 at the info level (even when not tracing). */
690static void OVS_PRINTF_FORMAT(2, 3)
691xlate_report_info(const struct xlate_ctx *ctx, const char *format, ...)
692{
693 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
694 if (!OVS_UNLIKELY(ctx->xin->trace)
695 && (!ctx->xin->packet || VLOG_DROP_INFO(&rl))) {
696 return;
697 }
698
699 struct ds s = DS_EMPTY_INITIALIZER;
700 va_list args;
701 va_start(args, format);
702 ds_put_format_valist(&s, format, args);
703 va_end(args);
704
705 if (ctx->xin->trace) {
706 oftrace_report(ctx->xin->trace, OFT_WARN, ds_cstr(&s));
707 } else {
708 ds_put_format(&s, " on bridge %s while processing ",
709 ctx->xbridge->name);
710 flow_format(&s, &ctx->base_flow, NULL);
711 VLOG_INFO("%s", ds_cstr(&s));
712 }
713 ds_destroy(&s);
714}
715
2d9b49dd
BP
716/* This is like xlate_report() for messages that should be logged at debug
717 * level (even if we are not tracing) because they can be valuable for
718 * debugging. */
719static void OVS_PRINTF_FORMAT(3, 4)
720xlate_report_debug(const struct xlate_ctx *ctx, enum oftrace_node_type type,
721 const char *format, ...)
722{
723 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(30, 300);
724 if (!OVS_UNLIKELY(ctx->xin->trace)
725 && (!ctx->xin->packet || VLOG_DROP_DBG(&rl))) {
726 return;
727 }
728
729 struct ds s = DS_EMPTY_INITIALIZER;
730 va_list args;
731 va_start(args, format);
732 ds_put_format_valist(&s, format, args);
733 va_end(args);
fff1b9c0 734
2d9b49dd
BP
735 if (ctx->xin->trace) {
736 oftrace_report(ctx->xin->trace, type, ds_cstr(&s));
737 } else {
738 VLOG_DBG("bridge %s: %s", ctx->xbridge->name, ds_cstr(&s));
739 }
740 ds_destroy(&s);
741}
fff1b9c0 742
2d9b49dd
BP
743/* If tracing is enabled in 'ctx', appends a node of the given 'type' to the
744 * trace, whose text is 'title' followed by a formatted version of the
745 * 'ofpacts_len' OpenFlow actions in 'ofpacts'.
746 *
747 * If tracing is not enabled, does nothing. */
748static void
749xlate_report_actions(const struct xlate_ctx *ctx, enum oftrace_node_type type,
750 const char *title,
d6bef3cc
BP
751 const struct ofpact *ofpacts, size_t ofpacts_len)
752{
2d9b49dd 753 if (OVS_UNLIKELY(ctx->xin->trace)) {
d6bef3cc 754 struct ds s = DS_EMPTY_INITIALIZER;
2d9b49dd 755 ds_put_format(&s, "%s: ", title);
efefbcae
BP
756 struct ofpact_format_params fp = { .s = &s };
757 ofpacts_format(ofpacts, ofpacts_len, &fp);
2d9b49dd 758 oftrace_report(ctx->xin->trace, type, ds_cstr(&s));
d6bef3cc
BP
759 ds_destroy(&s);
760 }
761}
762
2d9b49dd
BP
763/* If tracing is enabled in 'ctx', appends a node of type OFT_DETAIL to the
764 * trace, whose the message is a formatted version of the OpenFlow action set.
765 * 'verb' should be "was" or "is", depending on whether the action set reported
766 * is the new action set or the old one.
767 *
768 * If tracing is not enabled, does nothing. */
769static void
770xlate_report_action_set(const struct xlate_ctx *ctx, const char *verb)
771{
772 if (OVS_UNLIKELY(ctx->xin->trace)) {
773 struct ofpbuf action_list;
774 ofpbuf_init(&action_list, 0);
775 ofpacts_execute_action_set(&action_list, &ctx->action_set);
776 if (action_list.size) {
777 struct ds s = DS_EMPTY_INITIALIZER;
efefbcae
BP
778 struct ofpact_format_params fp = { .s = &s };
779 ofpacts_format(action_list.data, action_list.size, &fp);
2d9b49dd
BP
780 xlate_report(ctx, OFT_DETAIL, "action set %s: %s",
781 verb, ds_cstr(&s));
782 ds_destroy(&s);
783 } else {
784 xlate_report(ctx, OFT_DETAIL, "action set %s empty", verb);
785 }
786 ofpbuf_uninit(&action_list);
787 }
788}
789
790
791/* If tracing is enabled in 'ctx', appends a node representing 'rule' (in
792 * OpenFlow table 'table_id') to the trace and makes this node the parent for
793 * future trace nodes. The caller should save ctx->xin->trace before calling
794 * this function, then after tracing all of the activities under the table,
795 * restore its previous value.
796 *
797 * If tracing is not enabled, does nothing. */
798static void
799xlate_report_table(const struct xlate_ctx *ctx, struct rule_dpif *rule,
800 uint8_t table_id)
801{
802 if (OVS_LIKELY(!ctx->xin->trace)) {
803 return;
804 }
805
806 struct ds s = DS_EMPTY_INITIALIZER;
807 ds_put_format(&s, "%2d. ", table_id);
808 if (rule == ctx->xin->ofproto->miss_rule) {
809 ds_put_cstr(&s, "No match, and a \"packet-in\" is called for.");
810 } else if (rule == ctx->xin->ofproto->no_packet_in_rule) {
811 ds_put_cstr(&s, "No match.");
812 } else if (rule == ctx->xin->ofproto->drop_frags_rule) {
813 ds_put_cstr(&s, "Packets are IP fragments and "
814 "the fragment handling mode is \"drop\".");
815 } else {
816 minimatch_format(&rule->up.cr.match,
817 ofproto_get_tun_tab(&ctx->xin->ofproto->up),
50f96b10 818 NULL, &s, OFP_DEFAULT_PRIORITY);
2d9b49dd
BP
819 if (ds_last(&s) != ' ') {
820 ds_put_cstr(&s, ", ");
821 }
822 ds_put_format(&s, "priority %d", rule->up.cr.priority);
823 if (rule->up.flow_cookie) {
824 ds_put_format(&s, ", cookie %#"PRIx64,
825 ntohll(rule->up.flow_cookie));
826 }
827 }
828 ctx->xin->trace = &oftrace_report(ctx->xin->trace, OFT_TABLE,
829 ds_cstr(&s))->subs;
830 ds_destroy(&s);
831}
832
833/* If tracing is enabled in 'ctx', adds an OFT_DETAIL trace node to 'ctx'
834 * reporting the value of subfield 'sf'.
835 *
836 * If tracing is not enabled, does nothing. */
837static void
838xlate_report_subfield(const struct xlate_ctx *ctx,
839 const struct mf_subfield *sf)
840{
841 if (OVS_UNLIKELY(ctx->xin->trace)) {
842 struct ds s = DS_EMPTY_INITIALIZER;
843 mf_format_subfield(sf, &s);
844 ds_put_cstr(&s, " is now ");
845
846 if (sf->ofs == 0 && sf->n_bits >= sf->field->n_bits) {
847 union mf_value value;
848 mf_get_value(sf->field, &ctx->xin->flow, &value);
50f96b10 849 mf_format(sf->field, &value, NULL, NULL, &s);
2d9b49dd
BP
850 } else {
851 union mf_subvalue cst;
852 mf_read_subfield(sf, &ctx->xin->flow, &cst);
853 ds_put_hex(&s, &cst, sizeof cst);
854 }
855
856 xlate_report(ctx, OFT_DETAIL, "%s", ds_cstr(&s));
857
858 ds_destroy(&s);
859 }
860}
861\f
84f0f298
RW
862static void
863xlate_xbridge_init(struct xlate_cfg *xcfg, struct xbridge *xbridge)
864{
417e7e66 865 ovs_list_init(&xbridge->xbundles);
84f0f298
RW
866 hmap_init(&xbridge->xports);
867 hmap_insert(&xcfg->xbridges, &xbridge->hmap_node,
868 hash_pointer(xbridge->ofproto, 0));
869}
870
871static void
872xlate_xbundle_init(struct xlate_cfg *xcfg, struct xbundle *xbundle)
873{
417e7e66
BW
874 ovs_list_init(&xbundle->xports);
875 ovs_list_insert(&xbundle->xbridge->xbundles, &xbundle->list_node);
84f0f298
RW
876 hmap_insert(&xcfg->xbundles, &xbundle->hmap_node,
877 hash_pointer(xbundle->ofbundle, 0));
878}
879
880static void
881xlate_xport_init(struct xlate_cfg *xcfg, struct xport *xport)
882{
883 hmap_init(&xport->skb_priorities);
884 hmap_insert(&xcfg->xports, &xport->hmap_node,
885 hash_pointer(xport->ofport, 0));
886 hmap_insert(&xport->xbridge->xports, &xport->ofp_node,
887 hash_ofp_port(xport->ofp_port));
c3594cc3
ZB
888 hmap_insert(&xcfg->xports_uuid, &xport->uuid_node,
889 uuid_hash(&xport->uuid));
84f0f298
RW
890}
891
83c2757b
ZB
892static struct xbridge_addr *
893xbridge_addr_create(struct xbridge *xbridge)
894{
895 struct xbridge_addr *xbridge_addr = xbridge->addr;
896 struct in6_addr *addr = NULL, *mask = NULL;
897 struct netdev *dev;
898 int err, n_addr = 0;
899
900 err = netdev_open(xbridge->name, NULL, &dev);
901 if (!err) {
902 err = netdev_get_addr_list(dev, &addr, &mask, &n_addr);
903 if (!err) {
904 if (!xbridge->addr ||
905 n_addr != xbridge->addr->n_addr ||
906 (xbridge->addr->addr && memcmp(addr, xbridge->addr->addr,
907 sizeof(*addr) * n_addr))) {
908 xbridge_addr = xzalloc(sizeof *xbridge_addr);
909 xbridge_addr->addr = addr;
910 xbridge_addr->n_addr = n_addr;
911 ovs_refcount_init(&xbridge_addr->ref_cnt);
912 } else {
913 free(addr);
914 }
915 free(mask);
916 }
917 netdev_close(dev);
918 }
919
920 return xbridge_addr;
921}
922
923static struct xbridge_addr *
924xbridge_addr_ref(const struct xbridge_addr *addr_)
925{
926 struct xbridge_addr *addr = CONST_CAST(struct xbridge_addr *, addr_);
927 if (addr) {
928 ovs_refcount_ref(&addr->ref_cnt);
929 }
930 return addr;
931}
932
933static void
934xbridge_addr_unref(struct xbridge_addr *addr)
935{
936 if (addr && ovs_refcount_unref_relaxed(&addr->ref_cnt) == 1) {
937 free(addr->addr);
938 free(addr);
939 }
940}
941
84f0f298
RW
942static void
943xlate_xbridge_set(struct xbridge *xbridge,
944 struct dpif *dpif,
ec89fc6f 945 const struct mac_learning *ml, struct stp *stp,
9efd308e 946 struct rstp *rstp, const struct mcast_snooping *ms,
ec89fc6f 947 const struct mbridge *mbridge,
46c88433 948 const struct dpif_sflow *sflow,
ce3955be 949 const struct dpif_ipfix *ipfix,
2f47cdf4 950 const struct netflow *netflow,
4b97b70d 951 bool forward_bpdu, bool has_in_band,
83c2757b
ZB
952 const struct dpif_backer_support *support,
953 const struct xbridge_addr *addr)
46c88433 954{
46c88433
EJ
955 if (xbridge->ml != ml) {
956 mac_learning_unref(xbridge->ml);
957 xbridge->ml = mac_learning_ref(ml);
958 }
959
6d95c4e8
FL
960 if (xbridge->ms != ms) {
961 mcast_snooping_unref(xbridge->ms);
962 xbridge->ms = mcast_snooping_ref(ms);
963 }
964
46c88433
EJ
965 if (xbridge->mbridge != mbridge) {
966 mbridge_unref(xbridge->mbridge);
967 xbridge->mbridge = mbridge_ref(mbridge);
968 }
969
970 if (xbridge->sflow != sflow) {
971 dpif_sflow_unref(xbridge->sflow);
972 xbridge->sflow = dpif_sflow_ref(sflow);
973 }
974
975 if (xbridge->ipfix != ipfix) {
976 dpif_ipfix_unref(xbridge->ipfix);
977 xbridge->ipfix = dpif_ipfix_ref(ipfix);
978 }
979
9d189a50
EJ
980 if (xbridge->stp != stp) {
981 stp_unref(xbridge->stp);
982 xbridge->stp = stp_ref(stp);
983 }
984
9efd308e
DV
985 if (xbridge->rstp != rstp) {
986 rstp_unref(xbridge->rstp);
987 xbridge->rstp = rstp_ref(rstp);
988 }
989
ce3955be
EJ
990 if (xbridge->netflow != netflow) {
991 netflow_unref(xbridge->netflow);
992 xbridge->netflow = netflow_ref(netflow);
993 }
994
83c2757b
ZB
995 if (xbridge->addr != addr) {
996 xbridge_addr_unref(xbridge->addr);
997 xbridge->addr = xbridge_addr_ref(addr);
998 }
999
89a8a7f0 1000 xbridge->dpif = dpif;
46c88433
EJ
1001 xbridge->forward_bpdu = forward_bpdu;
1002 xbridge->has_in_band = has_in_band;
b440dd8c 1003 xbridge->support = *support;
46c88433
EJ
1004}
1005
84f0f298
RW
1006static void
1007xlate_xbundle_set(struct xbundle *xbundle,
fed8962a
EG
1008 enum port_vlan_mode vlan_mode, uint16_t qinq_ethtype,
1009 int vlan, unsigned long *trunks, unsigned long *cvlans,
88f52d7f 1010 enum port_priority_tags_mode use_priority_tags,
84f0f298 1011 const struct bond *bond, const struct lacp *lacp,
c005f976 1012 bool floodable, bool protected)
84f0f298
RW
1013{
1014 ovs_assert(xbundle->xbridge);
1015
1016 xbundle->vlan_mode = vlan_mode;
fed8962a 1017 xbundle->qinq_ethtype = qinq_ethtype;
84f0f298
RW
1018 xbundle->vlan = vlan;
1019 xbundle->trunks = trunks;
fed8962a 1020 xbundle->cvlans = cvlans;
84f0f298
RW
1021 xbundle->use_priority_tags = use_priority_tags;
1022 xbundle->floodable = floodable;
c005f976 1023 xbundle->protected = protected;
84f0f298
RW
1024
1025 if (xbundle->bond != bond) {
1026 bond_unref(xbundle->bond);
1027 xbundle->bond = bond_ref(bond);
1028 }
1029
1030 if (xbundle->lacp != lacp) {
1031 lacp_unref(xbundle->lacp);
1032 xbundle->lacp = lacp_ref(lacp);
1033 }
1034}
1035
1036static void
1037xlate_xport_set(struct xport *xport, odp_port_t odp_port,
1038 const struct netdev *netdev, const struct cfm *cfm,
0477baa9 1039 const struct bfd *bfd, const struct lldp *lldp, int stp_port_no,
f025bcb7 1040 const struct rstp_port* rstp_port,
84f0f298
RW
1041 enum ofputil_port_config config, enum ofputil_port_state state,
1042 bool is_tunnel, bool may_enable)
1043{
1044 xport->config = config;
1045 xport->state = state;
1046 xport->stp_port_no = stp_port_no;
1047 xport->is_tunnel = is_tunnel;
875ab130 1048 xport->pt_mode = netdev_get_pt_mode(netdev);
84f0f298
RW
1049 xport->may_enable = may_enable;
1050 xport->odp_port = odp_port;
1051
f025bcb7
JR
1052 if (xport->rstp_port != rstp_port) {
1053 rstp_port_unref(xport->rstp_port);
1054 xport->rstp_port = rstp_port_ref(rstp_port);
1055 }
1056
84f0f298
RW
1057 if (xport->cfm != cfm) {
1058 cfm_unref(xport->cfm);
1059 xport->cfm = cfm_ref(cfm);
1060 }
1061
1062 if (xport->bfd != bfd) {
1063 bfd_unref(xport->bfd);
1064 xport->bfd = bfd_ref(bfd);
1065 }
1066
0477baa9
DF
1067 if (xport->lldp != lldp) {
1068 lldp_unref(xport->lldp);
1069 xport->lldp = lldp_ref(lldp);
1070 }
1071
84f0f298
RW
1072 if (xport->netdev != netdev) {
1073 netdev_close(xport->netdev);
1074 xport->netdev = netdev_ref(netdev);
1075 }
1076}
1077
1078static void
1079xlate_xbridge_copy(struct xbridge *xbridge)
1080{
1081 struct xbundle *xbundle;
1082 struct xport *xport;
1083 struct xbridge *new_xbridge = xzalloc(sizeof *xbridge);
1084 new_xbridge->ofproto = xbridge->ofproto;
1085 new_xbridge->name = xstrdup(xbridge->name);
1086 xlate_xbridge_init(new_xcfg, new_xbridge);
1087
1088 xlate_xbridge_set(new_xbridge,
34dd0d78 1089 xbridge->dpif, xbridge->ml, xbridge->stp,
9efd308e
DV
1090 xbridge->rstp, xbridge->ms, xbridge->mbridge,
1091 xbridge->sflow, xbridge->ipfix, xbridge->netflow,
b440dd8c 1092 xbridge->forward_bpdu, xbridge->has_in_band,
83c2757b 1093 &xbridge->support, xbridge->addr);
84f0f298
RW
1094 LIST_FOR_EACH (xbundle, list_node, &xbridge->xbundles) {
1095 xlate_xbundle_copy(new_xbridge, xbundle);
1096 }
1097
1098 /* Copy xports which are not part of a xbundle */
1099 HMAP_FOR_EACH (xport, ofp_node, &xbridge->xports) {
1100 if (!xport->xbundle) {
1101 xlate_xport_copy(new_xbridge, NULL, xport);
1102 }
1103 }
1104}
1105
1106static void
1107xlate_xbundle_copy(struct xbridge *xbridge, struct xbundle *xbundle)
1108{
1109 struct xport *xport;
1110 struct xbundle *new_xbundle = xzalloc(sizeof *xbundle);
1111 new_xbundle->ofbundle = xbundle->ofbundle;
1112 new_xbundle->xbridge = xbridge;
1113 new_xbundle->name = xstrdup(xbundle->name);
1114 xlate_xbundle_init(new_xcfg, new_xbundle);
1115
fed8962a
EG
1116 xlate_xbundle_set(new_xbundle, xbundle->vlan_mode, xbundle->qinq_ethtype,
1117 xbundle->vlan, xbundle->trunks, xbundle->cvlans,
84f0f298 1118 xbundle->use_priority_tags, xbundle->bond, xbundle->lacp,
c005f976 1119 xbundle->floodable, xbundle->protected);
84f0f298
RW
1120 LIST_FOR_EACH (xport, bundle_node, &xbundle->xports) {
1121 xlate_xport_copy(xbridge, new_xbundle, xport);
1122 }
1123}
1124
1125static void
1126xlate_xport_copy(struct xbridge *xbridge, struct xbundle *xbundle,
1127 struct xport *xport)
1128{
1129 struct skb_priority_to_dscp *pdscp, *new_pdscp;
1130 struct xport *new_xport = xzalloc(sizeof *xport);
1131 new_xport->ofport = xport->ofport;
1132 new_xport->ofp_port = xport->ofp_port;
1133 new_xport->xbridge = xbridge;
c3594cc3 1134 new_xport->uuid = xport->uuid;
84f0f298
RW
1135 xlate_xport_init(new_xcfg, new_xport);
1136
1137 xlate_xport_set(new_xport, xport->odp_port, xport->netdev, xport->cfm,
0477baa9
DF
1138 xport->bfd, xport->lldp, xport->stp_port_no,
1139 xport->rstp_port, xport->config, xport->state,
1140 xport->is_tunnel, xport->may_enable);
84f0f298
RW
1141
1142 if (xport->peer) {
1143 struct xport *peer = xport_lookup(new_xcfg, xport->peer->ofport);
1144 if (peer) {
1145 new_xport->peer = peer;
1146 new_xport->peer->peer = new_xport;
1147 }
1148 }
1149
1150 if (xbundle) {
1151 new_xport->xbundle = xbundle;
417e7e66 1152 ovs_list_insert(&new_xport->xbundle->xports, &new_xport->bundle_node);
84f0f298
RW
1153 }
1154
1155 HMAP_FOR_EACH (pdscp, hmap_node, &xport->skb_priorities) {
1156 new_pdscp = xmalloc(sizeof *pdscp);
1157 new_pdscp->skb_priority = pdscp->skb_priority;
1158 new_pdscp->dscp = pdscp->dscp;
1159 hmap_insert(&new_xport->skb_priorities, &new_pdscp->hmap_node,
1160 hash_int(new_pdscp->skb_priority, 0));
1161 }
1162}
1163
1164/* Sets the current xlate configuration to new_xcfg and frees the old xlate
1165 * configuration in xcfgp.
1166 *
1167 * This needs to be called after editing the xlate configuration.
1168 *
1169 * Functions that edit the new xlate configuration are
6cd20a22 1170 * xlate_<ofproto/bundle/ofport>_set and xlate_<ofproto/bundle/ofport>_remove.
84f0f298
RW
1171 *
1172 * A sample workflow:
1173 *
586cd310
BP
1174 * xlate_txn_start();
1175 * ...
1176 * edit_xlate_configuration();
1177 * ...
1178 * xlate_txn_commit();
1179 *
1180 * The ovsrcu_synchronize() call here also ensures that the upcall threads
1181 * retain no references to anything in the previous configuration.
1182 */
46c88433 1183void
84f0f298
RW
1184xlate_txn_commit(void)
1185{
1186 struct xlate_cfg *xcfg = ovsrcu_get(struct xlate_cfg *, &xcfgp);
1187
1188 ovsrcu_set(&xcfgp, new_xcfg);
40a9c4c2
AW
1189 ovsrcu_synchronize();
1190 xlate_xcfg_free(xcfg);
84f0f298
RW
1191 new_xcfg = NULL;
1192}
1193
1194/* Copies the current xlate configuration in xcfgp to new_xcfg.
1195 *
1196 * This needs to be called prior to editing the xlate configuration. */
1197void
1198xlate_txn_start(void)
1199{
1200 struct xbridge *xbridge;
1201 struct xlate_cfg *xcfg;
1202
1203 ovs_assert(!new_xcfg);
1204
1205 new_xcfg = xmalloc(sizeof *new_xcfg);
1206 hmap_init(&new_xcfg->xbridges);
1207 hmap_init(&new_xcfg->xbundles);
1208 hmap_init(&new_xcfg->xports);
c3594cc3 1209 hmap_init(&new_xcfg->xports_uuid);
84f0f298
RW
1210
1211 xcfg = ovsrcu_get(struct xlate_cfg *, &xcfgp);
1212 if (!xcfg) {
1213 return;
1214 }
1215
1216 HMAP_FOR_EACH (xbridge, hmap_node, &xcfg->xbridges) {
1217 xlate_xbridge_copy(xbridge);
1218 }
1219}
1220
1221
1222static void
1223xlate_xcfg_free(struct xlate_cfg *xcfg)
1224{
1225 struct xbridge *xbridge, *next_xbridge;
1226
1227 if (!xcfg) {
1228 return;
1229 }
1230
1231 HMAP_FOR_EACH_SAFE (xbridge, next_xbridge, hmap_node, &xcfg->xbridges) {
1232 xlate_xbridge_remove(xcfg, xbridge);
1233 }
1234
1235 hmap_destroy(&xcfg->xbridges);
1236 hmap_destroy(&xcfg->xbundles);
1237 hmap_destroy(&xcfg->xports);
c3594cc3 1238 hmap_destroy(&xcfg->xports_uuid);
84f0f298
RW
1239 free(xcfg);
1240}
1241
1242void
1243xlate_ofproto_set(struct ofproto_dpif *ofproto, const char *name,
34dd0d78 1244 struct dpif *dpif,
84f0f298 1245 const struct mac_learning *ml, struct stp *stp,
9efd308e 1246 struct rstp *rstp, const struct mcast_snooping *ms,
84f0f298
RW
1247 const struct mbridge *mbridge,
1248 const struct dpif_sflow *sflow,
1249 const struct dpif_ipfix *ipfix,
2f47cdf4 1250 const struct netflow *netflow,
b440dd8c
JS
1251 bool forward_bpdu, bool has_in_band,
1252 const struct dpif_backer_support *support)
84f0f298
RW
1253{
1254 struct xbridge *xbridge;
83c2757b 1255 struct xbridge_addr *xbridge_addr, *old_addr;
84f0f298
RW
1256
1257 ovs_assert(new_xcfg);
1258
1259 xbridge = xbridge_lookup(new_xcfg, ofproto);
1260 if (!xbridge) {
1261 xbridge = xzalloc(sizeof *xbridge);
1262 xbridge->ofproto = ofproto;
1263
1264 xlate_xbridge_init(new_xcfg, xbridge);
1265 }
1266
1267 free(xbridge->name);
1268 xbridge->name = xstrdup(name);
1269
83c2757b
ZB
1270 xbridge_addr = xbridge_addr_create(xbridge);
1271 old_addr = xbridge->addr;
1272
34dd0d78 1273 xlate_xbridge_set(xbridge, dpif, ml, stp, rstp, ms, mbridge, sflow, ipfix,
83c2757b
ZB
1274 netflow, forward_bpdu, has_in_band, support,
1275 xbridge_addr);
1276
1277 if (xbridge_addr != old_addr) {
1278 xbridge_addr_unref(xbridge_addr);
1279 }
84f0f298
RW
1280}
1281
1282static void
1283xlate_xbridge_remove(struct xlate_cfg *xcfg, struct xbridge *xbridge)
46c88433 1284{
46c88433
EJ
1285 struct xbundle *xbundle, *next_xbundle;
1286 struct xport *xport, *next_xport;
1287
1288 if (!xbridge) {
1289 return;
1290 }
1291
1292 HMAP_FOR_EACH_SAFE (xport, next_xport, ofp_node, &xbridge->xports) {
84f0f298 1293 xlate_xport_remove(xcfg, xport);
46c88433
EJ
1294 }
1295
1296 LIST_FOR_EACH_SAFE (xbundle, next_xbundle, list_node, &xbridge->xbundles) {
84f0f298 1297 xlate_xbundle_remove(xcfg, xbundle);
46c88433
EJ
1298 }
1299
84f0f298 1300 hmap_remove(&xcfg->xbridges, &xbridge->hmap_node);
795cc5c1 1301 mac_learning_unref(xbridge->ml);
6d95c4e8 1302 mcast_snooping_unref(xbridge->ms);
795cc5c1
EJ
1303 mbridge_unref(xbridge->mbridge);
1304 dpif_sflow_unref(xbridge->sflow);
1305 dpif_ipfix_unref(xbridge->ipfix);
3570f7e4 1306 netflow_unref(xbridge->netflow);
795cc5c1 1307 stp_unref(xbridge->stp);
9efd308e 1308 rstp_unref(xbridge->rstp);
83c2757b 1309 xbridge_addr_unref(xbridge->addr);
795cc5c1 1310 hmap_destroy(&xbridge->xports);
46c88433
EJ
1311 free(xbridge->name);
1312 free(xbridge);
1313}
1314
84f0f298
RW
1315void
1316xlate_remove_ofproto(struct ofproto_dpif *ofproto)
1317{
1318 struct xbridge *xbridge;
1319
1320 ovs_assert(new_xcfg);
1321
1322 xbridge = xbridge_lookup(new_xcfg, ofproto);
1323 xlate_xbridge_remove(new_xcfg, xbridge);
1324}
1325
46c88433
EJ
1326void
1327xlate_bundle_set(struct ofproto_dpif *ofproto, struct ofbundle *ofbundle,
f0fb825a 1328 const char *name, enum port_vlan_mode vlan_mode,
fed8962a
EG
1329 uint16_t qinq_ethtype, int vlan,
1330 unsigned long *trunks, unsigned long *cvlans,
88f52d7f 1331 enum port_priority_tags_mode use_priority_tags,
46c88433 1332 const struct bond *bond, const struct lacp *lacp,
c005f976 1333 bool floodable, bool protected)
46c88433 1334{
84f0f298 1335 struct xbundle *xbundle;
46c88433 1336
84f0f298
RW
1337 ovs_assert(new_xcfg);
1338
1339 xbundle = xbundle_lookup(new_xcfg, ofbundle);
46c88433
EJ
1340 if (!xbundle) {
1341 xbundle = xzalloc(sizeof *xbundle);
1342 xbundle->ofbundle = ofbundle;
84f0f298 1343 xbundle->xbridge = xbridge_lookup(new_xcfg, ofproto);
46c88433 1344
84f0f298 1345 xlate_xbundle_init(new_xcfg, xbundle);
46c88433
EJ
1346 }
1347
46c88433
EJ
1348 free(xbundle->name);
1349 xbundle->name = xstrdup(name);
1350
fed8962a 1351 xlate_xbundle_set(xbundle, vlan_mode, qinq_ethtype, vlan, trunks, cvlans,
c005f976 1352 use_priority_tags, bond, lacp, floodable, protected);
46c88433
EJ
1353}
1354
84f0f298
RW
1355static void
1356xlate_xbundle_remove(struct xlate_cfg *xcfg, struct xbundle *xbundle)
46c88433 1357{
5f03c983 1358 struct xport *xport;
46c88433
EJ
1359
1360 if (!xbundle) {
1361 return;
1362 }
1363
5f03c983 1364 LIST_FOR_EACH_POP (xport, bundle_node, &xbundle->xports) {
46c88433
EJ
1365 xport->xbundle = NULL;
1366 }
1367
84f0f298 1368 hmap_remove(&xcfg->xbundles, &xbundle->hmap_node);
417e7e66 1369 ovs_list_remove(&xbundle->list_node);
46c88433
EJ
1370 bond_unref(xbundle->bond);
1371 lacp_unref(xbundle->lacp);
1372 free(xbundle->name);
1373 free(xbundle);
1374}
1375
84f0f298
RW
1376void
1377xlate_bundle_remove(struct ofbundle *ofbundle)
1378{
1379 struct xbundle *xbundle;
1380
1381 ovs_assert(new_xcfg);
1382
1383 xbundle = xbundle_lookup(new_xcfg, ofbundle);
1384 xlate_xbundle_remove(new_xcfg, xbundle);
1385}
1386
46c88433
EJ
1387void
1388xlate_ofport_set(struct ofproto_dpif *ofproto, struct ofbundle *ofbundle,
1389 struct ofport_dpif *ofport, ofp_port_t ofp_port,
1390 odp_port_t odp_port, const struct netdev *netdev,
1391 const struct cfm *cfm, const struct bfd *bfd,
0477baa9
DF
1392 const struct lldp *lldp, struct ofport_dpif *peer,
1393 int stp_port_no, const struct rstp_port *rstp_port,
55954f6e 1394 const struct ofproto_port_queue *qdscp_list, size_t n_qdscp,
dd8cd4b4
SH
1395 enum ofputil_port_config config,
1396 enum ofputil_port_state state, bool is_tunnel,
9d189a50 1397 bool may_enable)
46c88433 1398{
55954f6e 1399 size_t i;
84f0f298
RW
1400 struct xport *xport;
1401
1402 ovs_assert(new_xcfg);
46c88433 1403
84f0f298 1404 xport = xport_lookup(new_xcfg, ofport);
46c88433
EJ
1405 if (!xport) {
1406 xport = xzalloc(sizeof *xport);
1407 xport->ofport = ofport;
84f0f298 1408 xport->xbridge = xbridge_lookup(new_xcfg, ofproto);
46c88433 1409 xport->ofp_port = ofp_port;
c3594cc3 1410 uuid_generate(&xport->uuid);
46c88433 1411
84f0f298 1412 xlate_xport_init(new_xcfg, xport);
46c88433
EJ
1413 }
1414
1415 ovs_assert(xport->ofp_port == ofp_port);
1416
0477baa9
DF
1417 xlate_xport_set(xport, odp_port, netdev, cfm, bfd, lldp,
1418 stp_port_no, rstp_port, config, state, is_tunnel,
1419 may_enable);
46c88433
EJ
1420
1421 if (xport->peer) {
1422 xport->peer->peer = NULL;
1423 }
84f0f298 1424 xport->peer = xport_lookup(new_xcfg, peer);
46c88433
EJ
1425 if (xport->peer) {
1426 xport->peer->peer = xport;
1427 }
1428
1429 if (xport->xbundle) {
417e7e66 1430 ovs_list_remove(&xport->bundle_node);
46c88433 1431 }
84f0f298 1432 xport->xbundle = xbundle_lookup(new_xcfg, ofbundle);
46c88433 1433 if (xport->xbundle) {
417e7e66 1434 ovs_list_insert(&xport->xbundle->xports, &xport->bundle_node);
46c88433 1435 }
55954f6e
EJ
1436
1437 clear_skb_priorities(xport);
1438 for (i = 0; i < n_qdscp; i++) {
1439 struct skb_priority_to_dscp *pdscp;
1440 uint32_t skb_priority;
1441
89a8a7f0
EJ
1442 if (dpif_queue_to_priority(xport->xbridge->dpif, qdscp_list[i].queue,
1443 &skb_priority)) {
55954f6e
EJ
1444 continue;
1445 }
1446
1447 pdscp = xmalloc(sizeof *pdscp);
1448 pdscp->skb_priority = skb_priority;
1449 pdscp->dscp = (qdscp_list[i].dscp << 2) & IP_DSCP_MASK;
1450 hmap_insert(&xport->skb_priorities, &pdscp->hmap_node,
1451 hash_int(pdscp->skb_priority, 0));
1452 }
46c88433
EJ
1453}
1454
84f0f298
RW
1455static void
1456xlate_xport_remove(struct xlate_cfg *xcfg, struct xport *xport)
46c88433 1457{
46c88433
EJ
1458 if (!xport) {
1459 return;
1460 }
1461
1462 if (xport->peer) {
1463 xport->peer->peer = NULL;
1464 xport->peer = NULL;
1465 }
1466
e621a12d 1467 if (xport->xbundle) {
417e7e66 1468 ovs_list_remove(&xport->bundle_node);
e621a12d
EJ
1469 }
1470
55954f6e
EJ
1471 clear_skb_priorities(xport);
1472 hmap_destroy(&xport->skb_priorities);
1473
84f0f298 1474 hmap_remove(&xcfg->xports, &xport->hmap_node);
c3594cc3 1475 hmap_remove(&xcfg->xports_uuid, &xport->uuid_node);
46c88433
EJ
1476 hmap_remove(&xport->xbridge->xports, &xport->ofp_node);
1477
1478 netdev_close(xport->netdev);
f025bcb7 1479 rstp_port_unref(xport->rstp_port);
46c88433
EJ
1480 cfm_unref(xport->cfm);
1481 bfd_unref(xport->bfd);
0477baa9 1482 lldp_unref(xport->lldp);
46c88433
EJ
1483 free(xport);
1484}
1485
84f0f298
RW
1486void
1487xlate_ofport_remove(struct ofport_dpif *ofport)
1488{
1489 struct xport *xport;
1490
1491 ovs_assert(new_xcfg);
1492
1493 xport = xport_lookup(new_xcfg, ofport);
8416e50f
VD
1494 if (xport) {
1495 tnl_neigh_flush(netdev_get_name(xport->netdev));
1496 }
84f0f298
RW
1497 xlate_xport_remove(new_xcfg, xport);
1498}
1499
ef377a58 1500static struct ofproto_dpif *
d40533fc
BP
1501xlate_lookup_ofproto_(const struct dpif_backer *backer,
1502 const struct flow *flow,
1503 ofp_port_t *ofp_in_port, const struct xport **xportp,
1504 char **errorp)
ef377a58 1505{
e672ff9b 1506 struct xlate_cfg *xcfg = ovsrcu_get(struct xlate_cfg *, &xcfgp);
ef377a58 1507 const struct xport *xport;
f9038ef6 1508
00135b86
ZB
1509 /* If packet is recirculated, xport can be retrieved from frozen state. */
1510 if (flow->recirc_id) {
1511 const struct recirc_id_node *recirc_id_node;
1512
1513 recirc_id_node = recirc_id_node_find(flow->recirc_id);
1514
1515 if (OVS_UNLIKELY(!recirc_id_node)) {
d40533fc
BP
1516 if (errorp) {
1517 *errorp = xasprintf("no recirculation data for recirc_id "
1518 "%"PRIu32, flow->recirc_id);
1519 }
00135b86
ZB
1520 return NULL;
1521 }
1522
1523 /* If recirculation was initiated due to bond (in_port = OFPP_NONE)
1524 * then frozen state is static and xport_uuid is not defined, so xport
1525 * cannot be restored from frozen state. */
1526 if (recirc_id_node->state.metadata.in_port != OFPP_NONE) {
1527 struct uuid xport_uuid = recirc_id_node->state.xport_uuid;
1528 xport = xport_lookup_by_uuid(xcfg, &xport_uuid);
1529 if (xport && xport->xbridge && xport->xbridge->ofproto) {
1530 goto out;
1531 }
1532 }
1533 }
1534
e672ff9b
JR
1535 xport = xport_lookup(xcfg, tnl_port_should_receive(flow)
1536 ? tnl_port_receive(flow)
1537 : odp_port_to_ofport(backer, flow->in_port.odp_port));
1538 if (OVS_UNLIKELY(!xport)) {
d40533fc
BP
1539 if (errorp) {
1540 *errorp = (tnl_port_should_receive(flow)
1541 ? xstrdup("no OpenFlow tunnel port for this packet")
1542 : xasprintf("no OpenFlow tunnel port for datapath "
1543 "port %"PRIu32, flow->in_port.odp_port));
1544 }
e672ff9b 1545 return NULL;
ef377a58 1546 }
00135b86
ZB
1547
1548out:
d40533fc
BP
1549 if (errorp) {
1550 *errorp = NULL;
1551 }
e672ff9b 1552 *xportp = xport;
f9038ef6 1553 if (ofp_in_port) {
e672ff9b 1554 *ofp_in_port = xport->ofp_port;
f9038ef6 1555 }
e672ff9b 1556 return xport->xbridge->ofproto;
ef377a58
JR
1557}
1558
1559/* Given a datapath and flow metadata ('backer', and 'flow' respectively)
1560 * returns the corresponding struct ofproto_dpif and OpenFlow port number. */
1561struct ofproto_dpif *
1562xlate_lookup_ofproto(const struct dpif_backer *backer, const struct flow *flow,
d40533fc 1563 ofp_port_t *ofp_in_port, char **errorp)
ef377a58
JR
1564{
1565 const struct xport *xport;
1566
d40533fc 1567 return xlate_lookup_ofproto_(backer, flow, ofp_in_port, &xport, errorp);
ef377a58
JR
1568}
1569
cc377352 1570/* Given a datapath and flow metadata ('backer', and 'flow' respectively),
bfc691bb 1571 * optionally populates 'ofprotop' with the ofproto_dpif, 'ofp_in_port' with the
cc377352 1572 * openflow in_port, and 'ipfix', 'sflow', and 'netflow' with the appropriate
dcc2c6cd
JR
1573 * handles for those protocols if they're enabled. Caller may use the returned
1574 * pointers until quiescing, for longer term use additional references must
1575 * be taken.
8449c4d6 1576 *
f9038ef6 1577 * Returns 0 if successful, ENODEV if the parsed flow has no associated ofproto.
ef377a58 1578 */
8449c4d6 1579int
5c476ea3
JR
1580xlate_lookup(const struct dpif_backer *backer, const struct flow *flow,
1581 struct ofproto_dpif **ofprotop, struct dpif_ipfix **ipfix,
1582 struct dpif_sflow **sflow, struct netflow **netflow,
1583 ofp_port_t *ofp_in_port)
8449c4d6 1584{
ef377a58 1585 struct ofproto_dpif *ofproto;
84f0f298 1586 const struct xport *xport;
8449c4d6 1587
d40533fc 1588 ofproto = xlate_lookup_ofproto_(backer, flow, ofp_in_port, &xport, NULL);
8449c4d6 1589
f9038ef6 1590 if (!ofproto) {
cc377352 1591 return ENODEV;
8449c4d6 1592 }
8449c4d6 1593
ef377a58
JR
1594 if (ofprotop) {
1595 *ofprotop = ofproto;
8449c4d6
EJ
1596 }
1597
1dfdb9b3 1598 if (ipfix) {
f9038ef6 1599 *ipfix = xport ? xport->xbridge->ipfix : NULL;
1dfdb9b3
EJ
1600 }
1601
1602 if (sflow) {
f9038ef6 1603 *sflow = xport ? xport->xbridge->sflow : NULL;
1dfdb9b3
EJ
1604 }
1605
1606 if (netflow) {
f9038ef6 1607 *netflow = xport ? xport->xbridge->netflow : NULL;
1dfdb9b3 1608 }
f9038ef6 1609
cc377352 1610 return 0;
8449c4d6
EJ
1611}
1612
46c88433 1613static struct xbridge *
84f0f298 1614xbridge_lookup(struct xlate_cfg *xcfg, const struct ofproto_dpif *ofproto)
46c88433 1615{
84f0f298 1616 struct hmap *xbridges;
46c88433
EJ
1617 struct xbridge *xbridge;
1618
84f0f298 1619 if (!ofproto || !xcfg) {
5e6af486
EJ
1620 return NULL;
1621 }
1622
84f0f298
RW
1623 xbridges = &xcfg->xbridges;
1624
46c88433 1625 HMAP_FOR_EACH_IN_BUCKET (xbridge, hmap_node, hash_pointer(ofproto, 0),
84f0f298 1626 xbridges) {
46c88433
EJ
1627 if (xbridge->ofproto == ofproto) {
1628 return xbridge;
1629 }
1630 }
1631 return NULL;
1632}
1633
290835f9
BP
1634static struct xbridge *
1635xbridge_lookup_by_uuid(struct xlate_cfg *xcfg, const struct uuid *uuid)
1636{
1637 struct xbridge *xbridge;
1638
1639 HMAP_FOR_EACH (xbridge, hmap_node, &xcfg->xbridges) {
07a3cd5c 1640 if (uuid_equals(&xbridge->ofproto->uuid, uuid)) {
290835f9
BP
1641 return xbridge;
1642 }
1643 }
1644 return NULL;
1645}
1646
46c88433 1647static struct xbundle *
84f0f298 1648xbundle_lookup(struct xlate_cfg *xcfg, const struct ofbundle *ofbundle)
46c88433 1649{
84f0f298 1650 struct hmap *xbundles;
46c88433
EJ
1651 struct xbundle *xbundle;
1652
84f0f298 1653 if (!ofbundle || !xcfg) {
5e6af486
EJ
1654 return NULL;
1655 }
1656
84f0f298
RW
1657 xbundles = &xcfg->xbundles;
1658
46c88433 1659 HMAP_FOR_EACH_IN_BUCKET (xbundle, hmap_node, hash_pointer(ofbundle, 0),
84f0f298 1660 xbundles) {
46c88433
EJ
1661 if (xbundle->ofbundle == ofbundle) {
1662 return xbundle;
1663 }
1664 }
1665 return NULL;
1666}
1667
1668static struct xport *
84f0f298 1669xport_lookup(struct xlate_cfg *xcfg, const struct ofport_dpif *ofport)
46c88433 1670{
84f0f298 1671 struct hmap *xports;
46c88433
EJ
1672 struct xport *xport;
1673
84f0f298 1674 if (!ofport || !xcfg) {
5e6af486
EJ
1675 return NULL;
1676 }
1677
84f0f298
RW
1678 xports = &xcfg->xports;
1679
46c88433 1680 HMAP_FOR_EACH_IN_BUCKET (xport, hmap_node, hash_pointer(ofport, 0),
84f0f298 1681 xports) {
46c88433
EJ
1682 if (xport->ofport == ofport) {
1683 return xport;
1684 }
1685 }
1686 return NULL;
1687}
1688
00135b86
ZB
1689static struct xport *
1690xport_lookup_by_uuid(struct xlate_cfg *xcfg, const struct uuid *uuid)
1691{
1692 struct hmap *xports;
1693 struct xport *xport;
1694
1695 if (uuid_is_zero(uuid) || !xcfg) {
1696 return NULL;
1697 }
1698
1699 xports = &xcfg->xports_uuid;
1700
1701 HMAP_FOR_EACH_IN_BUCKET (xport, uuid_node, uuid_hash(uuid), xports) {
1702 if (uuid_equals(&xport->uuid, uuid)) {
1703 return xport;
1704 }
1705 }
1706 return NULL;
1707}
1708
40085e56
EJ
1709static struct stp_port *
1710xport_get_stp_port(const struct xport *xport)
1711{
92cf817b 1712 return xport->xbridge->stp && xport->stp_port_no != -1
40085e56
EJ
1713 ? stp_get_port(xport->xbridge->stp, xport->stp_port_no)
1714 : NULL;
1715}
9d189a50 1716
0d1cee12 1717static bool
9d189a50
EJ
1718xport_stp_learn_state(const struct xport *xport)
1719{
40085e56 1720 struct stp_port *sp = xport_get_stp_port(xport);
4b5f1996
DV
1721 return sp
1722 ? stp_learn_in_state(stp_port_get_state(sp))
1723 : true;
9d189a50
EJ
1724}
1725
1726static bool
1727xport_stp_forward_state(const struct xport *xport)
1728{
40085e56 1729 struct stp_port *sp = xport_get_stp_port(xport);
4b5f1996
DV
1730 return sp
1731 ? stp_forward_in_state(stp_port_get_state(sp))
1732 : true;
9d189a50
EJ
1733}
1734
0d1cee12 1735static bool
bacdb85a 1736xport_stp_should_forward_bpdu(const struct xport *xport)
0d1cee12
K
1737{
1738 struct stp_port *sp = xport_get_stp_port(xport);
bacdb85a 1739 return stp_should_forward_bpdu(sp ? stp_port_get_state(sp) : STP_DISABLED);
0d1cee12
K
1740}
1741
9d189a50
EJ
1742/* Returns true if STP should process 'flow'. Sets fields in 'wc' that
1743 * were used to make the determination.*/
1744static bool
1745stp_should_process_flow(const struct flow *flow, struct flow_wildcards *wc)
1746{
bbbca389 1747 /* is_stp() also checks dl_type, but dl_type is always set in 'wc'. */
9d189a50 1748 memset(&wc->masks.dl_dst, 0xff, sizeof wc->masks.dl_dst);
bbbca389 1749 return is_stp(flow);
9d189a50
EJ
1750}
1751
1752static void
cf62fa4c 1753stp_process_packet(const struct xport *xport, const struct dp_packet *packet)
9d189a50 1754{
40085e56 1755 struct stp_port *sp = xport_get_stp_port(xport);
cf62fa4c
PS
1756 struct dp_packet payload = *packet;
1757 struct eth_header *eth = dp_packet_data(&payload);
9d189a50
EJ
1758
1759 /* Sink packets on ports that have STP disabled when the bridge has
1760 * STP enabled. */
1761 if (!sp || stp_port_get_state(sp) == STP_DISABLED) {
1762 return;
1763 }
1764
1765 /* Trim off padding on payload. */
cf62fa4c
PS
1766 if (dp_packet_size(&payload) > ntohs(eth->eth_type) + ETH_HEADER_LEN) {
1767 dp_packet_set_size(&payload, ntohs(eth->eth_type) + ETH_HEADER_LEN);
9d189a50
EJ
1768 }
1769
cf62fa4c
PS
1770 if (dp_packet_try_pull(&payload, ETH_HEADER_LEN + LLC_HEADER_LEN)) {
1771 stp_received_bpdu(sp, dp_packet_data(&payload), dp_packet_size(&payload));
9d189a50
EJ
1772 }
1773}
1774
f025bcb7
JR
1775static enum rstp_state
1776xport_get_rstp_port_state(const struct xport *xport)
9efd308e 1777{
f025bcb7
JR
1778 return xport->rstp_port
1779 ? rstp_port_get_state(xport->rstp_port)
1780 : RSTP_DISABLED;
9efd308e
DV
1781}
1782
1783static bool
1784xport_rstp_learn_state(const struct xport *xport)
1785{
4b5f1996
DV
1786 return xport->xbridge->rstp && xport->rstp_port
1787 ? rstp_learn_in_state(xport_get_rstp_port_state(xport))
1788 : true;
9efd308e
DV
1789}
1790
1791static bool
1792xport_rstp_forward_state(const struct xport *xport)
1793{
4b5f1996
DV
1794 return xport->xbridge->rstp && xport->rstp_port
1795 ? rstp_forward_in_state(xport_get_rstp_port_state(xport))
1796 : true;
9efd308e
DV
1797}
1798
1799static bool
1800xport_rstp_should_manage_bpdu(const struct xport *xport)
1801{
f025bcb7 1802 return rstp_should_manage_bpdu(xport_get_rstp_port_state(xport));
9efd308e
DV
1803}
1804
1805static void
cf62fa4c 1806rstp_process_packet(const struct xport *xport, const struct dp_packet *packet)
9efd308e 1807{
cf62fa4c
PS
1808 struct dp_packet payload = *packet;
1809 struct eth_header *eth = dp_packet_data(&payload);
9efd308e 1810
f025bcb7
JR
1811 /* Sink packets on ports that have no RSTP. */
1812 if (!xport->rstp_port) {
9efd308e
DV
1813 return;
1814 }
1815
1816 /* Trim off padding on payload. */
cf62fa4c
PS
1817 if (dp_packet_size(&payload) > ntohs(eth->eth_type) + ETH_HEADER_LEN) {
1818 dp_packet_set_size(&payload, ntohs(eth->eth_type) + ETH_HEADER_LEN);
9efd308e
DV
1819 }
1820
f2f78d5c
MM
1821 int len = ETH_HEADER_LEN + LLC_HEADER_LEN;
1822 if (eth->eth_type == htons(ETH_TYPE_VLAN)) {
1823 len += VLAN_HEADER_LEN;
1824 }
1825 if (dp_packet_try_pull(&payload, len)) {
cf62fa4c
PS
1826 rstp_port_received_bpdu(xport->rstp_port, dp_packet_data(&payload),
1827 dp_packet_size(&payload));
9efd308e
DV
1828 }
1829}
1830
46c88433
EJ
1831static struct xport *
1832get_ofp_port(const struct xbridge *xbridge, ofp_port_t ofp_port)
1833{
1834 struct xport *xport;
1835
1836 HMAP_FOR_EACH_IN_BUCKET (xport, ofp_node, hash_ofp_port(ofp_port),
1837 &xbridge->xports) {
1838 if (xport->ofp_port == ofp_port) {
1839 return xport;
1840 }
1841 }
1842 return NULL;
1843}
1844
1845static odp_port_t
1846ofp_port_to_odp_port(const struct xbridge *xbridge, ofp_port_t ofp_port)
1847{
1848 const struct xport *xport = get_ofp_port(xbridge, ofp_port);
1849 return xport ? xport->odp_port : ODPP_NONE;
1850}
1851
dd8cd4b4
SH
1852static bool
1853odp_port_is_alive(const struct xlate_ctx *ctx, ofp_port_t ofp_port)
1854{
086fa873
BP
1855 struct xport *xport = get_ofp_port(ctx->xbridge, ofp_port);
1856 return xport && xport->may_enable;
dd8cd4b4
SH
1857}
1858
1e684d7d 1859static struct ofputil_bucket *
dd8cd4b4
SH
1860group_first_live_bucket(const struct xlate_ctx *, const struct group_dpif *,
1861 int depth);
1862
1863static bool
1864group_is_alive(const struct xlate_ctx *ctx, uint32_t group_id, int depth)
1865{
1866 struct group_dpif *group;
dd8cd4b4 1867
5d08a275 1868 group = group_dpif_lookup(ctx->xbridge->ofproto, group_id,
1f4a8933 1869 ctx->xin->tables_version, false);
db88b35c 1870 if (group) {
76973237 1871 return group_first_live_bucket(ctx, group, depth) != NULL;
dc25893e 1872 }
dd8cd4b4 1873
dc25893e 1874 return false;
dd8cd4b4
SH
1875}
1876
1877#define MAX_LIVENESS_RECURSION 128 /* Arbitrary limit */
1878
1879static bool
1880bucket_is_alive(const struct xlate_ctx *ctx,
1e684d7d 1881 struct ofputil_bucket *bucket, int depth)
dd8cd4b4
SH
1882{
1883 if (depth >= MAX_LIVENESS_RECURSION) {
2d9b49dd
BP
1884 xlate_report_error(ctx, "bucket chaining exceeded %d links",
1885 MAX_LIVENESS_RECURSION);
dd8cd4b4
SH
1886 return false;
1887 }
1888
fdb1999b
AZ
1889 return (!ofputil_bucket_has_liveness(bucket)
1890 || (bucket->watch_port != OFPP_ANY
1891 && odp_port_is_alive(ctx, bucket->watch_port))
1892 || (bucket->watch_group != OFPG_ANY
1893 && group_is_alive(ctx, bucket->watch_group, depth + 1)));
dd8cd4b4
SH
1894}
1895
fac4786a
BP
1896static void
1897xlate_report_bucket_not_live(const struct xlate_ctx *ctx,
1898 const struct ofputil_bucket *bucket)
1899{
1900 if (OVS_UNLIKELY(ctx->xin->trace)) {
1901 struct ds s = DS_EMPTY_INITIALIZER;
1902 if (bucket->watch_port != OFPP_ANY) {
1903 ds_put_cstr(&s, "port ");
1904 ofputil_format_port(bucket->watch_port, NULL, &s);
1905 }
1906 if (bucket->watch_group != OFPG_ANY) {
1907 if (s.length) {
1908 ds_put_cstr(&s, " and ");
1909 }
1910 ds_put_format(&s, "port %"PRIu32, bucket->watch_group);
1911 }
1912
1913 xlate_report(ctx, OFT_DETAIL, "bucket %"PRIu32": not live due to %s",
1914 bucket->bucket_id, ds_cstr(&s));
1915
1916 ds_destroy(&s);
1917 }
1918}
1919
1e684d7d 1920static struct ofputil_bucket *
dd8cd4b4
SH
1921group_first_live_bucket(const struct xlate_ctx *ctx,
1922 const struct group_dpif *group, int depth)
1923{
1924 struct ofputil_bucket *bucket;
07a3cd5c 1925 LIST_FOR_EACH (bucket, list_node, &group->up.buckets) {
dd8cd4b4
SH
1926 if (bucket_is_alive(ctx, bucket, depth)) {
1927 return bucket;
1928 }
fac4786a 1929 xlate_report_bucket_not_live(ctx, bucket);
dd8cd4b4
SH
1930 }
1931
1932 return NULL;
1933}
1934
1e684d7d 1935static struct ofputil_bucket *
fe7e5749
SH
1936group_best_live_bucket(const struct xlate_ctx *ctx,
1937 const struct group_dpif *group,
1938 uint32_t basis)
1939{
1e684d7d 1940 struct ofputil_bucket *best_bucket = NULL;
fe7e5749 1941 uint32_t best_score = 0;
fe7e5749 1942
1e684d7d 1943 struct ofputil_bucket *bucket;
07a3cd5c 1944 LIST_FOR_EACH (bucket, list_node, &group->up.buckets) {
fe7e5749 1945 if (bucket_is_alive(ctx, bucket, 0)) {
c09cb861
LS
1946 uint32_t score =
1947 (hash_int(bucket->bucket_id, basis) & 0xffff) * bucket->weight;
fe7e5749
SH
1948 if (score >= best_score) {
1949 best_bucket = bucket;
1950 best_score = score;
1951 }
fac4786a
BP
1952 xlate_report(ctx, OFT_DETAIL, "bucket %"PRIu32": score %"PRIu32,
1953 bucket->bucket_id, score);
1954 } else {
1955 xlate_report_bucket_not_live(ctx, bucket);
fe7e5749 1956 }
fe7e5749
SH
1957 }
1958
1959 return best_bucket;
1960}
1961
9583bc14 1962static bool
46c88433 1963xbundle_trunks_vlan(const struct xbundle *bundle, uint16_t vlan)
9583bc14
EJ
1964{
1965 return (bundle->vlan_mode != PORT_VLAN_ACCESS
1966 && (!bundle->trunks || bitmap_is_set(bundle->trunks, vlan)));
1967}
1968
fed8962a
EG
1969static bool
1970xbundle_allows_cvlan(const struct xbundle *bundle, uint16_t vlan)
1971{
1972 return (!bundle->cvlans || bitmap_is_set(bundle->cvlans, vlan));
1973}
1974
9583bc14 1975static bool
f0fb825a 1976xbundle_includes_vlan(const struct xbundle *xbundle, const struct xvlan *xvlan)
46c88433 1977{
f0fb825a
EG
1978 switch (xbundle->vlan_mode) {
1979 case PORT_VLAN_ACCESS:
1980 return xvlan->v[0].vid == xbundle->vlan && xvlan->v[1].vid == 0;
1981
1982 case PORT_VLAN_TRUNK:
1983 case PORT_VLAN_NATIVE_UNTAGGED:
1984 case PORT_VLAN_NATIVE_TAGGED:
1985 return xbundle_trunks_vlan(xbundle, xvlan->v[0].vid);
1986
fed8962a
EG
1987 case PORT_VLAN_DOT1Q_TUNNEL:
1988 return xvlan->v[0].vid == xbundle->vlan &&
1989 xbundle_allows_cvlan(xbundle, xvlan->v[1].vid);
1990
f0fb825a
EG
1991 default:
1992 OVS_NOT_REACHED();
1993 }
46c88433
EJ
1994}
1995
1996static mirror_mask_t
1997xbundle_mirror_out(const struct xbridge *xbridge, struct xbundle *xbundle)
1998{
1999 return xbundle != &ofpp_none_bundle
2000 ? mirror_bundle_out(xbridge->mbridge, xbundle->ofbundle)
2001 : 0;
2002}
2003
2004static mirror_mask_t
2005xbundle_mirror_src(const struct xbridge *xbridge, struct xbundle *xbundle)
9583bc14 2006{
46c88433
EJ
2007 return xbundle != &ofpp_none_bundle
2008 ? mirror_bundle_src(xbridge->mbridge, xbundle->ofbundle)
2009 : 0;
9583bc14
EJ
2010}
2011
46c88433
EJ
2012static mirror_mask_t
2013xbundle_mirror_dst(const struct xbridge *xbridge, struct xbundle *xbundle)
9583bc14 2014{
46c88433
EJ
2015 return xbundle != &ofpp_none_bundle
2016 ? mirror_bundle_dst(xbridge->mbridge, xbundle->ofbundle)
2017 : 0;
2018}
2019
2020static struct xbundle *
2d9b49dd
BP
2021lookup_input_bundle__(const struct xbridge *xbridge,
2022 ofp_port_t in_port, struct xport **in_xportp)
46c88433
EJ
2023{
2024 struct xport *xport;
9583bc14
EJ
2025
2026 /* Find the port and bundle for the received packet. */
46c88433
EJ
2027 xport = get_ofp_port(xbridge, in_port);
2028 if (in_xportp) {
2029 *in_xportp = xport;
9583bc14 2030 }
46c88433
EJ
2031 if (xport && xport->xbundle) {
2032 return xport->xbundle;
9583bc14
EJ
2033 }
2034
6362203b
YT
2035 /* Special-case OFPP_NONE (OF1.0) and OFPP_CONTROLLER (OF1.1+),
2036 * which a controller may use as the ingress port for traffic that
2037 * it is sourcing. */
2038 if (in_port == OFPP_CONTROLLER || in_port == OFPP_NONE) {
9583bc14
EJ
2039 return &ofpp_none_bundle;
2040 }
2d9b49dd
BP
2041 return NULL;
2042}
9583bc14 2043
2d9b49dd
BP
2044static struct xbundle *
2045lookup_input_bundle(const struct xlate_ctx *ctx,
2046 ofp_port_t in_port, struct xport **in_xportp)
2047{
2048 struct xbundle *xbundle = lookup_input_bundle__(ctx->xbridge,
2049 in_port, in_xportp);
2050 if (!xbundle) {
2051 /* Odd. A few possible reasons here:
2052 *
2053 * - We deleted a port but there are still a few packets queued up
2054 * from it.
2055 *
2056 * - Someone externally added a port (e.g. "ovs-dpctl add-if") that
2057 * we don't know about.
2058 *
2059 * - The ofproto client didn't configure the port as part of a bundle.
2060 * This is particularly likely to happen if a packet was received on
2061 * the port after it was created, but before the client had a chance
2062 * to configure its bundle.
2063 */
94783c7c 2064 xlate_report_error(ctx, "received packet on unknown port %"PRIu32,
2d9b49dd 2065 in_port);
9583bc14 2066 }
2d9b49dd 2067 return xbundle;
9583bc14
EJ
2068}
2069
faa624b4
BP
2070/* Mirrors the packet represented by 'ctx' to appropriate mirror destinations,
2071 * given the packet is ingressing or egressing on 'xbundle', which has ingress
2072 * or egress (as appropriate) mirrors 'mirrors'. */
9583bc14 2073static void
7efbc3b7
BP
2074mirror_packet(struct xlate_ctx *ctx, struct xbundle *xbundle,
2075 mirror_mask_t mirrors)
9583bc14 2076{
f0fb825a
EG
2077 struct xvlan in_xvlan;
2078 struct xvlan xvlan;
2079
faa624b4
BP
2080 /* Figure out what VLAN the packet is in (because mirrors can select
2081 * packets on basis of VLAN). */
f0fb825a
EG
2082 xvlan_extract(&ctx->xin->flow, &in_xvlan);
2083 if (!input_vid_is_valid(ctx, in_xvlan.v[0].vid, xbundle)) {
9583bc14
EJ
2084 return;
2085 }
f0fb825a 2086 xvlan_input_translate(xbundle, &in_xvlan, &xvlan);
9583bc14 2087
7efbc3b7 2088 const struct xbridge *xbridge = ctx->xbridge;
9583bc14 2089
7efbc3b7
BP
2090 /* Don't mirror to destinations that we've already mirrored to. */
2091 mirrors &= ~ctx->mirrors;
9583bc14
EJ
2092 if (!mirrors) {
2093 return;
2094 }
2095
a3954fd8
BP
2096 /* 'mirrors' is a bit-mask of candidates for mirroring. Iterate through
2097 * the candidates, adding the ones that really should be mirrored to
2098 * 'used_mirrors', as long as some candidates remain. */
2099 mirror_mask_t used_mirrors = 0;
9583bc14 2100 while (mirrors) {
7efbc3b7 2101 const unsigned long *vlans;
ec7ceaed
EJ
2102 mirror_mask_t dup_mirrors;
2103 struct ofbundle *out;
ec7ceaed 2104 int out_vlan;
1356dbd1 2105 int snaplen;
ec7ceaed 2106
faa624b4 2107 /* Get the details of the mirror represented by the rightmost 1-bit. */
500db308
BP
2108 ovs_assert(mirror_get(xbridge->mbridge, raw_ctz(mirrors),
2109 &vlans, &dup_mirrors,
2110 &out, &snaplen, &out_vlan));
ec7ceaed 2111
1356dbd1 2112
faa624b4
BP
2113 /* If this mirror selects on the basis of VLAN, and it does not select
2114 * 'vlan', then discard this mirror and go on to the next one. */
ec7ceaed 2115 if (vlans) {
f0fb825a 2116 ctx->wc->masks.vlans[0].tci |= htons(VLAN_CFI | VLAN_VID_MASK);
9583bc14 2117 }
f0fb825a 2118 if (vlans && !bitmap_is_set(vlans, xvlan.v[0].vid)) {
9583bc14
EJ
2119 mirrors = zero_rightmost_1bit(mirrors);
2120 continue;
2121 }
2122
a3954fd8
BP
2123 /* We sent a packet to this mirror. */
2124 used_mirrors |= rightmost_1bit(mirrors);
2125
faa624b4
BP
2126 /* Record the mirror, and the mirrors that output to the same
2127 * destination, so that we don't mirror to them again. This must be
2128 * done now to ensure that output_normal(), below, doesn't recursively
2129 * output to the same mirrors. */
3d6151f3 2130 ctx->mirrors |= dup_mirrors;
1356dbd1 2131 ctx->mirror_snaplen = snaplen;
faa624b4
BP
2132
2133 /* Send the packet to the mirror. */
ec7ceaed 2134 if (out) {
0506f184 2135 struct xbundle *out_xbundle = xbundle_lookup(ctx->xcfg, out);
46c88433 2136 if (out_xbundle) {
f0fb825a 2137 output_normal(ctx, out_xbundle, &xvlan);
46c88433 2138 }
f0fb825a 2139 } else if (xvlan.v[0].vid != out_vlan
7efbc3b7 2140 && !eth_addr_is_reserved(ctx->xin->flow.dl_dst)) {
71f21279 2141 struct xbundle *xb;
f0fb825a 2142 uint16_t old_vid = xvlan.v[0].vid;
9583bc14 2143
f0fb825a 2144 xvlan.v[0].vid = out_vlan;
71f21279
BP
2145 LIST_FOR_EACH (xb, list_node, &xbridge->xbundles) {
2146 if (xbundle_includes_vlan(xb, &xvlan)
2147 && !xbundle_mirror_out(xbridge, xb)) {
2148 output_normal(ctx, xb, &xvlan);
9583bc14
EJ
2149 }
2150 }
f0fb825a 2151 xvlan.v[0].vid = old_vid;
9583bc14 2152 }
faa624b4
BP
2153
2154 /* output_normal() could have recursively output (to different
2155 * mirrors), so make sure that we don't send duplicates. */
2156 mirrors &= ~ctx->mirrors;
1356dbd1 2157 ctx->mirror_snaplen = 0;
9583bc14 2158 }
a3954fd8
BP
2159
2160 if (used_mirrors) {
2161 if (ctx->xin->resubmit_stats) {
2162 mirror_update_stats(xbridge->mbridge, used_mirrors,
2163 ctx->xin->resubmit_stats->n_packets,
2164 ctx->xin->resubmit_stats->n_bytes);
2165 }
2166 if (ctx->xin->xcache) {
2167 struct xc_entry *entry;
2168
2169 entry = xlate_cache_add_entry(ctx->xin->xcache, XC_MIRROR);
2170 entry->mirror.mbridge = mbridge_ref(xbridge->mbridge);
2171 entry->mirror.mirrors = used_mirrors;
2172 }
2173 }
9583bc14
EJ
2174}
2175
7efbc3b7
BP
2176static void
2177mirror_ingress_packet(struct xlate_ctx *ctx)
2178{
2179 if (mbridge_has_mirrors(ctx->xbridge->mbridge)) {
7efbc3b7 2180 struct xbundle *xbundle = lookup_input_bundle(
2d9b49dd 2181 ctx, ctx->xin->flow.in_port.ofp_port, NULL);
7efbc3b7
BP
2182 if (xbundle) {
2183 mirror_packet(ctx, xbundle,
2184 xbundle_mirror_src(ctx->xbridge, xbundle));
2185 }
2186 }
2187}
2188
46c88433 2189/* Checks whether a packet with the given 'vid' may ingress on 'in_xbundle'.
2d9b49dd 2190 * If so, returns true. Otherwise, returns false.
9583bc14
EJ
2191 *
2192 * 'vid' should be the VID obtained from the 802.1Q header that was received as
2193 * part of a packet (specify 0 if there was no 802.1Q header), in the range
2194 * 0...4095. */
2195static bool
2d9b49dd
BP
2196input_vid_is_valid(const struct xlate_ctx *ctx,
2197 uint16_t vid, struct xbundle *in_xbundle)
9583bc14
EJ
2198{
2199 /* Allow any VID on the OFPP_NONE port. */
46c88433 2200 if (in_xbundle == &ofpp_none_bundle) {
9583bc14
EJ
2201 return true;
2202 }
2203
46c88433 2204 switch (in_xbundle->vlan_mode) {
9583bc14
EJ
2205 case PORT_VLAN_ACCESS:
2206 if (vid) {
2d9b49dd
BP
2207 xlate_report_error(ctx, "dropping VLAN %"PRIu16" tagged "
2208 "packet received on port %s configured as VLAN "
fd13c6b5 2209 "%d access port", vid, in_xbundle->name,
2d9b49dd 2210 in_xbundle->vlan);
9583bc14
EJ
2211 return false;
2212 }
2213 return true;
2214
2215 case PORT_VLAN_NATIVE_UNTAGGED:
2216 case PORT_VLAN_NATIVE_TAGGED:
2217 if (!vid) {
2218 /* Port must always carry its native VLAN. */
2219 return true;
2220 }
2221 /* Fall through. */
2222 case PORT_VLAN_TRUNK:
f0fb825a 2223 if (!xbundle_trunks_vlan(in_xbundle, vid)) {
2d9b49dd
BP
2224 xlate_report_error(ctx, "dropping VLAN %"PRIu16" packet "
2225 "received on port %s not configured for "
2226 "trunking VLAN %"PRIu16,
2227 vid, in_xbundle->name, vid);
9583bc14
EJ
2228 return false;
2229 }
2230 return true;
2231
fed8962a
EG
2232 case PORT_VLAN_DOT1Q_TUNNEL:
2233 if (!xbundle_allows_cvlan(in_xbundle, vid)) {
2234 xlate_report_error(ctx, "dropping VLAN %"PRIu16" packet received "
2235 "on dot1q-tunnel port %s that excludes this "
2236 "VLAN", vid, in_xbundle->name);
2237 return false;
2238 }
2239 return true;
2240
9583bc14 2241 default:
428b2edd 2242 OVS_NOT_REACHED();
9583bc14
EJ
2243 }
2244
2245}
2246
f0fb825a
EG
2247static void
2248xvlan_copy(struct xvlan *dst, const struct xvlan *src)
2249{
2250 *dst = *src;
2251}
2252
2253static void
2254xvlan_pop(struct xvlan *src)
2255{
2256 memmove(&src->v[0], &src->v[1], sizeof(src->v) - sizeof(src->v[0]));
2257 memset(&src->v[FLOW_MAX_VLAN_HEADERS - 1], 0,
2258 sizeof(src->v[FLOW_MAX_VLAN_HEADERS - 1]));
2259}
2260
fed8962a
EG
2261static void
2262xvlan_push_uninit(struct xvlan *src)
2263{
2264 memmove(&src->v[1], &src->v[0], sizeof(src->v) - sizeof(src->v[0]));
2265 memset(&src->v[0], 0, sizeof(src->v[0]));
2266}
2267
f0fb825a
EG
2268/* Extract VLAN information (headers) from flow */
2269static void
2270xvlan_extract(const struct flow *flow, struct xvlan *xvlan)
2271{
2272 int i;
2273 memset(xvlan, 0, sizeof(*xvlan));
2274 for (i = 0; i < FLOW_MAX_VLAN_HEADERS; i++) {
2275 if (!eth_type_vlan(flow->vlans[i].tpid) ||
2276 !(flow->vlans[i].tci & htons(VLAN_CFI))) {
2277 break;
2278 }
2279 xvlan->v[i].tpid = ntohs(flow->vlans[i].tpid);
2280 xvlan->v[i].vid = vlan_tci_to_vid(flow->vlans[i].tci);
2281 xvlan->v[i].pcp = ntohs(flow->vlans[i].tci) & VLAN_PCP_MASK;
2282 }
2283}
2284
2285/* Put VLAN information (headers) to flow */
2286static void
ffbe41db
EB
2287xvlan_put(struct flow *flow, const struct xvlan *xvlan,
2288 enum port_priority_tags_mode use_priority_tags)
f0fb825a
EG
2289{
2290 ovs_be16 tci;
2291 int i;
2292 for (i = 0; i < FLOW_MAX_VLAN_HEADERS; i++) {
2293 tci = htons(xvlan->v[i].vid | (xvlan->v[i].pcp & VLAN_PCP_MASK));
ffbe41db
EB
2294 if (tci || ((use_priority_tags == PORT_PRIORITY_TAGS_ALWAYS) &&
2295 xvlan->v[i].tpid)) {
f0fb825a
EG
2296 tci |= htons(VLAN_CFI);
2297 flow->vlans[i].tpid = xvlan->v[i].tpid ?
2298 htons(xvlan->v[i].tpid) :
2299 htons(ETH_TYPE_VLAN_8021Q);
2300 }
2301 flow->vlans[i].tci = tci;
2302 }
2303}
2304
2305/* Given 'in_xvlan', extracted from the input 802.1Q headers received as part
2306 * of a packet, and 'in_xbundle', the bundle on which the packet was received,
2307 * returns the VLANs of the packet during bridge internal processing. */
2308static void
2309xvlan_input_translate(const struct xbundle *in_xbundle,
2310 const struct xvlan *in_xvlan, struct xvlan *xvlan)
2311{
2312
2313 switch (in_xbundle->vlan_mode) {
2314 case PORT_VLAN_ACCESS:
2315 memset(xvlan, 0, sizeof(*xvlan));
2316 xvlan->v[0].tpid = in_xvlan->v[0].tpid ? in_xvlan->v[0].tpid :
2317 ETH_TYPE_VLAN_8021Q;
2318 xvlan->v[0].vid = in_xbundle->vlan;
2319 xvlan->v[0].pcp = in_xvlan->v[0].pcp;
2320 break;
2321
2322 case PORT_VLAN_TRUNK:
2323 xvlan_copy(xvlan, in_xvlan);
2324 break;
2325
2326 case PORT_VLAN_NATIVE_UNTAGGED:
2327 case PORT_VLAN_NATIVE_TAGGED:
2328 xvlan_copy(xvlan, in_xvlan);
2329 if (!in_xvlan->v[0].vid) {
2330 xvlan->v[0].tpid = in_xvlan->v[0].tpid ? in_xvlan->v[0].tpid :
2331 ETH_TYPE_VLAN_8021Q;
2332 xvlan->v[0].vid = in_xbundle->vlan;
2333 xvlan->v[0].pcp = in_xvlan->v[0].pcp;
2334 }
2335 break;
2336
fed8962a
EG
2337 case PORT_VLAN_DOT1Q_TUNNEL:
2338 xvlan_copy(xvlan, in_xvlan);
2339 xvlan_push_uninit(xvlan);
2340 xvlan->v[0].tpid = in_xbundle->qinq_ethtype;
2341 xvlan->v[0].vid = in_xbundle->vlan;
2342 xvlan->v[0].pcp = 0;
2343 break;
2344
f0fb825a
EG
2345 default:
2346 OVS_NOT_REACHED();
2347 }
2348}
2349
2350/* Given 'xvlan', the VLANs of a packet during internal processing, and
2351 * 'out_xbundle', a bundle on which the packet is to be output, returns the
2352 * VLANs that should be included in output packet. */
2353static void
2354xvlan_output_translate(const struct xbundle *out_xbundle,
2355 const struct xvlan *xvlan, struct xvlan *out_xvlan)
9583bc14 2356{
46c88433 2357 switch (out_xbundle->vlan_mode) {
9583bc14 2358 case PORT_VLAN_ACCESS:
f0fb825a
EG
2359 memset(out_xvlan, 0, sizeof(*out_xvlan));
2360 break;
9583bc14
EJ
2361
2362 case PORT_VLAN_TRUNK:
2363 case PORT_VLAN_NATIVE_TAGGED:
f0fb825a
EG
2364 xvlan_copy(out_xvlan, xvlan);
2365 break;
9583bc14
EJ
2366
2367 case PORT_VLAN_NATIVE_UNTAGGED:
f0fb825a
EG
2368 xvlan_copy(out_xvlan, xvlan);
2369 if (xvlan->v[0].vid == out_xbundle->vlan) {
2370 xvlan_pop(out_xvlan);
2371 }
2372 break;
9583bc14 2373
fed8962a
EG
2374 case PORT_VLAN_DOT1Q_TUNNEL:
2375 xvlan_copy(out_xvlan, xvlan);
2376 xvlan_pop(out_xvlan);
2377 break;
2378
9583bc14 2379 default:
428b2edd 2380 OVS_NOT_REACHED();
9583bc14
EJ
2381 }
2382}
2383
fed8962a
EG
2384/* If output xbundle is dot1q-tunnel, set mask bits of cvlan */
2385static void
2386check_and_set_cvlan_mask(struct flow_wildcards *wc,
2387 const struct xbundle *xbundle)
2388{
2389 if (xbundle->vlan_mode == PORT_VLAN_DOT1Q_TUNNEL && xbundle->cvlans) {
2390 wc->masks.vlans[1].tci = htons(0xffff);
2391 }
2392}
2393
9583bc14 2394static void
46c88433 2395output_normal(struct xlate_ctx *ctx, const struct xbundle *out_xbundle,
f0fb825a 2396 const struct xvlan *xvlan)
9583bc14 2397{
9583bc14 2398 uint16_t vid;
f0fb825a 2399 union flow_vlan_hdr old_vlans[FLOW_MAX_VLAN_HEADERS];
46c88433 2400 struct xport *xport;
e93ef1c7
JR
2401 struct xlate_bond_recirc xr;
2402 bool use_recirc = false;
f0fb825a 2403 struct xvlan out_xvlan;
9583bc14 2404
fed8962a
EG
2405 check_and_set_cvlan_mask(ctx->wc, out_xbundle);
2406
f0fb825a
EG
2407 xvlan_output_translate(out_xbundle, xvlan, &out_xvlan);
2408 if (out_xbundle->use_priority_tags) {
2409 out_xvlan.v[0].pcp = ntohs(ctx->xin->flow.vlans[0].tci) &
2410 VLAN_PCP_MASK;
2411 }
2412 vid = out_xvlan.v[0].vid;
417e7e66 2413 if (ovs_list_is_empty(&out_xbundle->xports)) {
46c88433
EJ
2414 /* Partially configured bundle with no slaves. Drop the packet. */
2415 return;
2416 } else if (!out_xbundle->bond) {
417e7e66 2417 xport = CONTAINER_OF(ovs_list_front(&out_xbundle->xports), struct xport,
46c88433 2418 bundle_node);
9583bc14 2419 } else {
49a73e0c 2420 struct flow_wildcards *wc = ctx->wc;
84f0f298 2421 struct ofport_dpif *ofport;
adcf00ba 2422
a80aba3a
AZ
2423 if (ctx->xbridge->support.odp.recirc) {
2424 /* In case recirculation is not actually in use, 'xr.recirc_id'
2425 * will be set to '0', since a valid 'recirc_id' can
82f9f1f5
AZ
2426 * not be zero. */
2427 bond_update_post_recirc_rules(out_xbundle->bond,
2428 &xr.recirc_id,
2429 &xr.hash_basis);
2430 if (xr.recirc_id) {
2431 /* Use recirculation instead of output. */
2432 use_recirc = true;
e93ef1c7 2433 xr.hash_alg = OVS_HASH_ALG_L4;
54ecb5a2
AZ
2434 /* Recirculation does not require unmasking hash fields. */
2435 wc = NULL;
adcf00ba
AZ
2436 }
2437 }
46c88433 2438
54ecb5a2
AZ
2439 ofport = bond_choose_output_slave(out_xbundle->bond,
2440 &ctx->xin->flow, wc, vid);
0506f184 2441 xport = xport_lookup(ctx->xcfg, ofport);
46c88433
EJ
2442
2443 if (!xport) {
9583bc14
EJ
2444 /* No slaves enabled, so drop packet. */
2445 return;
2446 }
d6fc5f57 2447
e93ef1c7 2448 /* If use_recirc is set, the main thread will handle stats
b256dc52 2449 * accounting for this bond. */
e93ef1c7 2450 if (!use_recirc) {
b256dc52
JS
2451 if (ctx->xin->resubmit_stats) {
2452 bond_account(out_xbundle->bond, &ctx->xin->flow, vid,
2453 ctx->xin->resubmit_stats->n_bytes);
2454 }
2455 if (ctx->xin->xcache) {
2456 struct xc_entry *entry;
2457 struct flow *flow;
2458
2459 flow = &ctx->xin->flow;
2460 entry = xlate_cache_add_entry(ctx->xin->xcache, XC_BOND);
901a517e
JR
2461 entry->bond.bond = bond_ref(out_xbundle->bond);
2462 entry->bond.flow = xmemdup(flow, sizeof *flow);
2463 entry->bond.vid = vid;
b256dc52 2464 }
d6fc5f57 2465 }
9583bc14
EJ
2466 }
2467
f0fb825a 2468 memcpy(&old_vlans, &ctx->xin->flow.vlans, sizeof(old_vlans));
ffbe41db 2469 xvlan_put(&ctx->xin->flow, &out_xvlan, out_xbundle->use_priority_tags);
9583bc14 2470
feee58b9 2471 compose_output_action(ctx, xport->ofp_port, use_recirc ? &xr : NULL,
11938578 2472 false, false);
f0fb825a 2473 memcpy(&ctx->xin->flow.vlans, &old_vlans, sizeof(old_vlans));
9583bc14
EJ
2474}
2475
2476/* A VM broadcasts a gratuitous ARP to indicate that it has resumed after
2477 * migration. Older Citrix-patched Linux DomU used gratuitous ARP replies to
2478 * indicate this; newer upstream kernels use gratuitous ARP requests. */
2479static bool
2480is_gratuitous_arp(const struct flow *flow, struct flow_wildcards *wc)
2481{
2482 if (flow->dl_type != htons(ETH_TYPE_ARP)) {
2483 return false;
2484 }
2485
2486 memset(&wc->masks.dl_dst, 0xff, sizeof wc->masks.dl_dst);
2487 if (!eth_addr_is_broadcast(flow->dl_dst)) {
2488 return false;
2489 }
2490
2491 memset(&wc->masks.nw_proto, 0xff, sizeof wc->masks.nw_proto);
2492 if (flow->nw_proto == ARP_OP_REPLY) {
2493 return true;
2494 } else if (flow->nw_proto == ARP_OP_REQUEST) {
2495 memset(&wc->masks.nw_src, 0xff, sizeof wc->masks.nw_src);
2496 memset(&wc->masks.nw_dst, 0xff, sizeof wc->masks.nw_dst);
2497
2498 return flow->nw_src == flow->nw_dst;
2499 } else {
2500 return false;
2501 }
2502}
2503
ff69c24a
FL
2504/* Determines whether packets in 'flow' within 'xbridge' should be forwarded or
2505 * dropped. Returns true if they may be forwarded, false if they should be
2506 * dropped.
2507 *
2508 * 'in_port' must be the xport that corresponds to flow->in_port.
2509 * 'in_port' must be part of a bundle (e.g. in_port->bundle must be nonnull).
2510 *
2511 * 'vlan' must be the VLAN that corresponds to flow->vlan_tci on 'in_port', as
2512 * returned by input_vid_to_vlan(). It must be a valid VLAN for 'in_port', as
2513 * checked by input_vid_is_valid().
2514 *
2515 * May also add tags to '*tags', although the current implementation only does
2516 * so in one special case.
2517 */
2518static bool
2519is_admissible(struct xlate_ctx *ctx, struct xport *in_port,
2520 uint16_t vlan)
2521{
2522 struct xbundle *in_xbundle = in_port->xbundle;
2523 const struct xbridge *xbridge = ctx->xbridge;
2524 struct flow *flow = &ctx->xin->flow;
2525
2526 /* Drop frames for reserved multicast addresses
2527 * only if forward_bpdu option is absent. */
2528 if (!xbridge->forward_bpdu && eth_addr_is_reserved(flow->dl_dst)) {
2d9b49dd
BP
2529 xlate_report(ctx, OFT_DETAIL,
2530 "packet has reserved destination MAC, dropping");
ff69c24a
FL
2531 return false;
2532 }
2533
2534 if (in_xbundle->bond) {
2535 struct mac_entry *mac;
2536
2537 switch (bond_check_admissibility(in_xbundle->bond, in_port->ofport,
2538 flow->dl_dst)) {
2539 case BV_ACCEPT:
2540 break;
2541
2542 case BV_DROP:
2d9b49dd
BP
2543 xlate_report(ctx, OFT_DETAIL,
2544 "bonding refused admissibility, dropping");
ff69c24a
FL
2545 return false;
2546
2547 case BV_DROP_IF_MOVED:
2548 ovs_rwlock_rdlock(&xbridge->ml->rwlock);
2549 mac = mac_learning_lookup(xbridge->ml, flow->dl_src, vlan);
9d078ec2
BP
2550 if (mac
2551 && mac_entry_get_port(xbridge->ml, mac) != in_xbundle->ofbundle
49a73e0c 2552 && (!is_gratuitous_arp(flow, ctx->wc)
9d078ec2 2553 || mac_entry_is_grat_arp_locked(mac))) {
ff69c24a 2554 ovs_rwlock_unlock(&xbridge->ml->rwlock);
2d9b49dd
BP
2555 xlate_report(ctx, OFT_DETAIL,
2556 "SLB bond thinks this packet looped back, "
ff69c24a
FL
2557 "dropping");
2558 return false;
2559 }
2560 ovs_rwlock_unlock(&xbridge->ml->rwlock);
2561 break;
2562 }
2563 }
2564
2565 return true;
2566}
2567
2d9b49dd
BP
2568static bool
2569update_learning_table__(const struct xbridge *xbridge,
2570 struct xbundle *in_xbundle, struct eth_addr dl_src,
2571 int vlan, bool is_grat_arp)
2572{
2573 return (in_xbundle == &ofpp_none_bundle
2574 || !mac_learning_update(xbridge->ml, dl_src, vlan,
2575 is_grat_arp,
2576 in_xbundle->bond != NULL,
2577 in_xbundle->ofbundle));
2578}
2579
ee047520 2580static void
2d9b49dd 2581update_learning_table(const struct xlate_ctx *ctx,
064799a1
JR
2582 struct xbundle *in_xbundle, struct eth_addr dl_src,
2583 int vlan, bool is_grat_arp)
ee047520 2584{
2d9b49dd
BP
2585 if (!update_learning_table__(ctx->xbridge, in_xbundle, dl_src, vlan,
2586 is_grat_arp)) {
2587 xlate_report_debug(ctx, OFT_DETAIL, "learned that "ETH_ADDR_FMT" is "
2588 "on port %s in VLAN %d",
2589 ETH_ADDR_ARGS(dl_src), in_xbundle->name, vlan);
ee047520 2590 }
9583bc14
EJ
2591}
2592
86e2dcdd
FL
2593/* Updates multicast snooping table 'ms' given that a packet matching 'flow'
2594 * was received on 'in_xbundle' in 'vlan' and is either Report or Query. */
2595static void
2d9b49dd 2596update_mcast_snooping_table4__(const struct xlate_ctx *ctx,
06994f87
TLSC
2597 const struct flow *flow,
2598 struct mcast_snooping *ms, int vlan,
2599 struct xbundle *in_xbundle,
2600 const struct dp_packet *packet)
86e2dcdd
FL
2601 OVS_REQ_WRLOCK(ms->rwlock)
2602{
46445c63 2603 const struct igmp_header *igmp;
e3102e42 2604 int count;
46445c63 2605 size_t offset;
06994f87 2606 ovs_be32 ip4 = flow->igmp_group_ip4;
86e2dcdd 2607
46445c63
EC
2608 offset = (char *) dp_packet_l4(packet) - (char *) dp_packet_data(packet);
2609 igmp = dp_packet_at(packet, offset, IGMP_HEADER_LEN);
2610 if (!igmp || csum(igmp, dp_packet_l4_size(packet)) != 0) {
2d9b49dd
BP
2611 xlate_report_debug(ctx, OFT_DETAIL,
2612 "multicast snooping received bad IGMP "
2613 "checksum on port %s in VLAN %d",
2614 in_xbundle->name, vlan);
46445c63
EC
2615 return;
2616 }
2617
86e2dcdd
FL
2618 switch (ntohs(flow->tp_src)) {
2619 case IGMP_HOST_MEMBERSHIP_REPORT:
2620 case IGMPV2_HOST_MEMBERSHIP_REPORT:
964a4d5f 2621 if (mcast_snooping_add_group4(ms, ip4, vlan, in_xbundle->ofbundle)) {
2d9b49dd
BP
2622 xlate_report_debug(ctx, OFT_DETAIL,
2623 "multicast snooping learned that "
2624 IP_FMT" is on port %s in VLAN %d",
2625 IP_ARGS(ip4), in_xbundle->name, vlan);
86e2dcdd
FL
2626 }
2627 break;
2628 case IGMP_HOST_LEAVE_MESSAGE:
964a4d5f 2629 if (mcast_snooping_leave_group4(ms, ip4, vlan, in_xbundle->ofbundle)) {
2d9b49dd
BP
2630 xlate_report_debug(ctx, OFT_DETAIL, "multicast snooping leaving "
2631 IP_FMT" is on port %s in VLAN %d",
2632 IP_ARGS(ip4), in_xbundle->name, vlan);
86e2dcdd
FL
2633 }
2634 break;
2635 case IGMP_HOST_MEMBERSHIP_QUERY:
2636 if (flow->nw_src && mcast_snooping_add_mrouter(ms, vlan,
2d9b49dd
BP
2637 in_xbundle->ofbundle)) {
2638 xlate_report_debug(ctx, OFT_DETAIL, "multicast snooping query "
2639 "from "IP_FMT" is on port %s in VLAN %d",
2640 IP_ARGS(flow->nw_src), in_xbundle->name, vlan);
86e2dcdd
FL
2641 }
2642 break;
e3102e42 2643 case IGMPV3_HOST_MEMBERSHIP_REPORT:
2d9b49dd
BP
2644 count = mcast_snooping_add_report(ms, packet, vlan,
2645 in_xbundle->ofbundle);
2646 if (count) {
2647 xlate_report_debug(ctx, OFT_DETAIL, "multicast snooping processed "
2648 "%d addresses on port %s in VLAN %d",
2649 count, in_xbundle->name, vlan);
e3102e42
TLSC
2650 }
2651 break;
86e2dcdd
FL
2652 }
2653}
2654
06994f87 2655static void
2d9b49dd 2656update_mcast_snooping_table6__(const struct xlate_ctx *ctx,
06994f87
TLSC
2657 const struct flow *flow,
2658 struct mcast_snooping *ms, int vlan,
2659 struct xbundle *in_xbundle,
2660 const struct dp_packet *packet)
2661 OVS_REQ_WRLOCK(ms->rwlock)
2662{
46445c63 2663 const struct mld_header *mld;
06994f87 2664 int count;
46445c63
EC
2665 size_t offset;
2666
2667 offset = (char *) dp_packet_l4(packet) - (char *) dp_packet_data(packet);
2668 mld = dp_packet_at(packet, offset, MLD_HEADER_LEN);
2669
2670 if (!mld ||
2671 packet_csum_upperlayer6(dp_packet_l3(packet),
2672 mld, IPPROTO_ICMPV6,
2673 dp_packet_l4_size(packet)) != 0) {
2d9b49dd
BP
2674 xlate_report_debug(ctx, OFT_DETAIL, "multicast snooping received "
2675 "bad MLD checksum on port %s in VLAN %d",
2676 in_xbundle->name, vlan);
46445c63
EC
2677 return;
2678 }
06994f87
TLSC
2679
2680 switch (ntohs(flow->tp_src)) {
2681 case MLD_QUERY:
2682 if (!ipv6_addr_equals(&flow->ipv6_src, &in6addr_any)
2683 && mcast_snooping_add_mrouter(ms, vlan, in_xbundle->ofbundle)) {
2d9b49dd
BP
2684 xlate_report_debug(ctx, OFT_DETAIL, "multicast snooping query on "
2685 "port %s in VLAN %d", in_xbundle->name, vlan);
06994f87
TLSC
2686 }
2687 break;
2688 case MLD_REPORT:
2689 case MLD_DONE:
2690 case MLD2_REPORT:
2691 count = mcast_snooping_add_mld(ms, packet, vlan, in_xbundle->ofbundle);
2692 if (count) {
2d9b49dd
BP
2693 xlate_report_debug(ctx, OFT_DETAIL, "multicast snooping processed "
2694 "%d addresses on port %s in VLAN %d",
2695 count, in_xbundle->name, vlan);
06994f87
TLSC
2696 }
2697 break;
2698 }
2699}
2700
86e2dcdd
FL
2701/* Updates multicast snooping table 'ms' given that a packet matching 'flow'
2702 * was received on 'in_xbundle' in 'vlan'. */
2703static void
2d9b49dd 2704update_mcast_snooping_table(const struct xlate_ctx *ctx,
86e2dcdd 2705 const struct flow *flow, int vlan,
e3102e42
TLSC
2706 struct xbundle *in_xbundle,
2707 const struct dp_packet *packet)
86e2dcdd 2708{
2d9b49dd 2709 struct mcast_snooping *ms = ctx->xbridge->ms;
86e2dcdd 2710 struct xbundle *mcast_xbundle;
f4ae6e23 2711 struct mcast_port_bundle *fport;
86e2dcdd
FL
2712
2713 /* Don't learn the OFPP_NONE port. */
2714 if (in_xbundle == &ofpp_none_bundle) {
2715 return;
2716 }
2717
2718 /* Don't learn from flood ports */
2719 mcast_xbundle = NULL;
2720 ovs_rwlock_wrlock(&ms->rwlock);
f4ae6e23 2721 LIST_FOR_EACH(fport, node, &ms->fport_list) {
0506f184 2722 mcast_xbundle = xbundle_lookup(ctx->xcfg, fport->port);
86e2dcdd
FL
2723 if (mcast_xbundle == in_xbundle) {
2724 break;
2725 }
2726 }
2727
2728 if (!mcast_xbundle || mcast_xbundle != in_xbundle) {
06994f87 2729 if (flow->dl_type == htons(ETH_TYPE_IP)) {
2d9b49dd 2730 update_mcast_snooping_table4__(ctx, flow, ms, vlan,
06994f87
TLSC
2731 in_xbundle, packet);
2732 } else {
2d9b49dd 2733 update_mcast_snooping_table6__(ctx, flow, ms, vlan,
06994f87
TLSC
2734 in_xbundle, packet);
2735 }
86e2dcdd
FL
2736 }
2737 ovs_rwlock_unlock(&ms->rwlock);
2738}
efc93e66
BP
2739\f
2740/* A list of multicast output ports.
2741 *
2742 * We accumulate output ports and then do all the outputs afterward. It would
2743 * be more natural to do the outputs one at a time as we discover the need for
2744 * each one, but this can cause a deadlock because we need to take the
2745 * mcast_snooping's rwlock for reading to iterate through the port lists and
2746 * doing an output, if it goes to a patch port, can eventually come back to the
2747 * same mcast_snooping and attempt to take the write lock (see
2748 * https://github.com/openvswitch/ovs-issues/issues/153). */
2749struct mcast_output {
2750 /* Discrete ports. */
2751 struct xbundle **xbundles;
2752 size_t n, allocated;
2753
2754 /* If set, flood to all ports. */
2755 bool flood;
2756};
2757#define MCAST_OUTPUT_INIT { NULL, 0, 0, false }
2758
2759/* Add 'mcast_bundle' to 'out'. */
2760static void
2761mcast_output_add(struct mcast_output *out, struct xbundle *mcast_xbundle)
2762{
2763 if (out->n >= out->allocated) {
2764 out->xbundles = x2nrealloc(out->xbundles, &out->allocated,
2765 sizeof *out->xbundles);
2766 }
2767 out->xbundles[out->n++] = mcast_xbundle;
2768}
2769
2770/* Outputs the packet in 'ctx' to all of the output ports in 'out', given input
2771 * bundle 'in_xbundle' and the current 'xvlan'. */
2772static void
2773mcast_output_finish(struct xlate_ctx *ctx, struct mcast_output *out,
2774 struct xbundle *in_xbundle, struct xvlan *xvlan)
2775{
2776 if (out->flood) {
2777 xlate_normal_flood(ctx, in_xbundle, xvlan);
2778 } else {
2779 for (size_t i = 0; i < out->n; i++) {
2780 output_normal(ctx, out->xbundles[i], xvlan);
2781 }
2782 }
2783
2784 free(out->xbundles);
2785}
86e2dcdd
FL
2786
2787/* send the packet to ports having the multicast group learned */
2788static void
2789xlate_normal_mcast_send_group(struct xlate_ctx *ctx,
2790 struct mcast_snooping *ms OVS_UNUSED,
2791 struct mcast_group *grp,
f0fb825a 2792 struct xbundle *in_xbundle,
efc93e66 2793 struct mcast_output *out)
86e2dcdd
FL
2794 OVS_REQ_RDLOCK(ms->rwlock)
2795{
86e2dcdd
FL
2796 struct mcast_group_bundle *b;
2797 struct xbundle *mcast_xbundle;
2798
86e2dcdd 2799 LIST_FOR_EACH(b, bundle_node, &grp->bundle_lru) {
0506f184 2800 mcast_xbundle = xbundle_lookup(ctx->xcfg, b->port);
86e2dcdd 2801 if (mcast_xbundle && mcast_xbundle != in_xbundle) {
2d9b49dd 2802 xlate_report(ctx, OFT_DETAIL, "forwarding to mcast group port");
efc93e66 2803 mcast_output_add(out, mcast_xbundle);
86e2dcdd 2804 } else if (!mcast_xbundle) {
2d9b49dd
BP
2805 xlate_report(ctx, OFT_WARN,
2806 "mcast group port is unknown, dropping");
86e2dcdd 2807 } else {
2d9b49dd
BP
2808 xlate_report(ctx, OFT_DETAIL,
2809 "mcast group port is input port, dropping");
86e2dcdd
FL
2810 }
2811 }
2812}
2813
2814/* send the packet to ports connected to multicast routers */
2815static void
2816xlate_normal_mcast_send_mrouters(struct xlate_ctx *ctx,
2817 struct mcast_snooping *ms,
f0fb825a 2818 struct xbundle *in_xbundle,
efc93e66
BP
2819 const struct xvlan *xvlan,
2820 struct mcast_output *out)
86e2dcdd
FL
2821 OVS_REQ_RDLOCK(ms->rwlock)
2822{
86e2dcdd
FL
2823 struct mcast_mrouter_bundle *mrouter;
2824 struct xbundle *mcast_xbundle;
2825
86e2dcdd 2826 LIST_FOR_EACH(mrouter, mrouter_node, &ms->mrouter_lru) {
0506f184 2827 mcast_xbundle = xbundle_lookup(ctx->xcfg, mrouter->port);
94a881c1 2828 if (mcast_xbundle && mcast_xbundle != in_xbundle
f0fb825a 2829 && mrouter->vlan == xvlan->v[0].vid) {
2d9b49dd 2830 xlate_report(ctx, OFT_DETAIL, "forwarding to mcast router port");
efc93e66 2831 mcast_output_add(out, mcast_xbundle);
86e2dcdd 2832 } else if (!mcast_xbundle) {
2d9b49dd
BP
2833 xlate_report(ctx, OFT_WARN,
2834 "mcast router port is unknown, dropping");
f0fb825a 2835 } else if (mrouter->vlan != xvlan->v[0].vid) {
2d9b49dd
BP
2836 xlate_report(ctx, OFT_DETAIL,
2837 "mcast router is on another vlan, dropping");
86e2dcdd 2838 } else {
2d9b49dd
BP
2839 xlate_report(ctx, OFT_DETAIL,
2840 "mcast router port is input port, dropping");
86e2dcdd
FL
2841 }
2842 }
2843}
2844
2845/* send the packet to ports flagged to be flooded */
2846static void
2847xlate_normal_mcast_send_fports(struct xlate_ctx *ctx,
2848 struct mcast_snooping *ms,
f0fb825a 2849 struct xbundle *in_xbundle,
efc93e66 2850 struct mcast_output *out)
86e2dcdd
FL
2851 OVS_REQ_RDLOCK(ms->rwlock)
2852{
f4ae6e23 2853 struct mcast_port_bundle *fport;
86e2dcdd
FL
2854 struct xbundle *mcast_xbundle;
2855
f4ae6e23 2856 LIST_FOR_EACH(fport, node, &ms->fport_list) {
0506f184 2857 mcast_xbundle = xbundle_lookup(ctx->xcfg, fport->port);
86e2dcdd 2858 if (mcast_xbundle && mcast_xbundle != in_xbundle) {
2d9b49dd 2859 xlate_report(ctx, OFT_DETAIL, "forwarding to mcast flood port");
efc93e66 2860 mcast_output_add(out, mcast_xbundle);
86e2dcdd 2861 } else if (!mcast_xbundle) {
2d9b49dd
BP
2862 xlate_report(ctx, OFT_WARN,
2863 "mcast flood port is unknown, dropping");
86e2dcdd 2864 } else {
2d9b49dd
BP
2865 xlate_report(ctx, OFT_DETAIL,
2866 "mcast flood port is input port, dropping");
86e2dcdd
FL
2867 }
2868 }
2869}
2870
8e04a33f
FL
2871/* forward the Reports to configured ports */
2872static void
2873xlate_normal_mcast_send_rports(struct xlate_ctx *ctx,
2874 struct mcast_snooping *ms,
f0fb825a 2875 struct xbundle *in_xbundle,
efc93e66 2876 struct mcast_output *out)
8e04a33f
FL
2877 OVS_REQ_RDLOCK(ms->rwlock)
2878{
8e04a33f
FL
2879 struct mcast_port_bundle *rport;
2880 struct xbundle *mcast_xbundle;
2881
8e04a33f 2882 LIST_FOR_EACH(rport, node, &ms->rport_list) {
0506f184 2883 mcast_xbundle = xbundle_lookup(ctx->xcfg, rport->port);
c781bd52
LJ
2884 if (mcast_xbundle
2885 && mcast_xbundle != in_xbundle
2886 && mcast_xbundle->ofbundle != in_xbundle->ofbundle) {
2d9b49dd
BP
2887 xlate_report(ctx, OFT_DETAIL,
2888 "forwarding report to mcast flagged port");
efc93e66 2889 mcast_output_add(out, mcast_xbundle);
8e04a33f 2890 } else if (!mcast_xbundle) {
2d9b49dd
BP
2891 xlate_report(ctx, OFT_WARN,
2892 "mcast port is unknown, dropping the report");
8e04a33f 2893 } else {
2d9b49dd
BP
2894 xlate_report(ctx, OFT_DETAIL,
2895 "mcast port is input port, dropping the Report");
8e04a33f
FL
2896 }
2897 }
2898}
2899
682800a4
FL
2900static void
2901xlate_normal_flood(struct xlate_ctx *ctx, struct xbundle *in_xbundle,
f0fb825a 2902 struct xvlan *xvlan)
682800a4
FL
2903{
2904 struct xbundle *xbundle;
2905
2906 LIST_FOR_EACH (xbundle, list_node, &ctx->xbridge->xbundles) {
2907 if (xbundle != in_xbundle
c781bd52 2908 && xbundle->ofbundle != in_xbundle->ofbundle
f0fb825a 2909 && xbundle_includes_vlan(xbundle, xvlan)
682800a4
FL
2910 && xbundle->floodable
2911 && !xbundle_mirror_out(ctx->xbridge, xbundle)) {
f0fb825a 2912 output_normal(ctx, xbundle, xvlan);
682800a4
FL
2913 }
2914 }
2031ef97 2915 ctx->nf_output_iface = NF_OUT_FLOOD;
682800a4
FL
2916}
2917
a75636c8
BP
2918static bool
2919is_ip_local_multicast(const struct flow *flow, struct flow_wildcards *wc)
2920{
2921 if (flow->dl_type == htons(ETH_TYPE_IP)) {
2922 memset(&wc->masks.nw_dst, 0xff, sizeof wc->masks.nw_dst);
2923 return ip_is_local_multicast(flow->nw_dst);
2924 } else if (flow->dl_type == htons(ETH_TYPE_IPV6)) {
2925 memset(&wc->masks.ipv6_dst, 0xff, sizeof wc->masks.ipv6_dst);
2926 return ipv6_is_all_hosts(&flow->ipv6_dst);
2927 } else {
2928 return false;
2929 }
2930}
2931
9583bc14
EJ
2932static void
2933xlate_normal(struct xlate_ctx *ctx)
2934{
49a73e0c 2935 struct flow_wildcards *wc = ctx->wc;
33bf9176 2936 struct flow *flow = &ctx->xin->flow;
46c88433
EJ
2937 struct xbundle *in_xbundle;
2938 struct xport *in_port;
9583bc14 2939 struct mac_entry *mac;
d6d5bbc9 2940 void *mac_port;
f0fb825a
EG
2941 struct xvlan in_xvlan;
2942 struct xvlan xvlan;
9583bc14 2943 uint16_t vlan;
9583bc14 2944
33bf9176
BP
2945 memset(&wc->masks.dl_src, 0xff, sizeof wc->masks.dl_src);
2946 memset(&wc->masks.dl_dst, 0xff, sizeof wc->masks.dl_dst);
f0fb825a 2947 wc->masks.vlans[0].tci |= htons(VLAN_VID_MASK | VLAN_CFI);
9583bc14 2948
2d9b49dd 2949 in_xbundle = lookup_input_bundle(ctx, flow->in_port.ofp_port, &in_port);
46c88433 2950 if (!in_xbundle) {
2d9b49dd 2951 xlate_report(ctx, OFT_WARN, "no input bundle, dropping");
9583bc14
EJ
2952 return;
2953 }
2954
2955 /* Drop malformed frames. */
f0fb825a
EG
2956 if (eth_type_vlan(flow->dl_type) &&
2957 !(flow->vlans[0].tci & htons(VLAN_CFI))) {
9583bc14 2958 if (ctx->xin->packet != NULL) {
2d9b49dd
BP
2959 xlate_report_error(ctx, "dropping packet with partial "
2960 "VLAN tag received on port %s",
2961 in_xbundle->name);
9583bc14 2962 }
2d9b49dd 2963 xlate_report(ctx, OFT_WARN, "partial VLAN tag, dropping");
9583bc14
EJ
2964 return;
2965 }
2966
2967 /* Drop frames on bundles reserved for mirroring. */
46c88433 2968 if (xbundle_mirror_out(ctx->xbridge, in_xbundle)) {
9583bc14 2969 if (ctx->xin->packet != NULL) {
2d9b49dd
BP
2970 xlate_report_error(ctx, "dropping packet received on port %s, "
2971 "which is reserved exclusively for mirroring",
2972 in_xbundle->name);
9583bc14 2973 }
2d9b49dd
BP
2974 xlate_report(ctx, OFT_WARN,
2975 "input port is mirror output port, dropping");
9583bc14
EJ
2976 return;
2977 }
2978
2979 /* Check VLAN. */
f0fb825a
EG
2980 xvlan_extract(flow, &in_xvlan);
2981 if (!input_vid_is_valid(ctx, in_xvlan.v[0].vid, in_xbundle)) {
2d9b49dd
BP
2982 xlate_report(ctx, OFT_WARN,
2983 "disallowed VLAN VID for this input port, dropping");
9583bc14
EJ
2984 return;
2985 }
f0fb825a
EG
2986 xvlan_input_translate(in_xbundle, &in_xvlan, &xvlan);
2987 vlan = xvlan.v[0].vid;
9583bc14
EJ
2988
2989 /* Check other admissibility requirements. */
2990 if (in_port && !is_admissible(ctx, in_port, vlan)) {
2991 return;
2992 }
2993
2994 /* Learn source MAC. */
064799a1 2995 bool is_grat_arp = is_gratuitous_arp(flow, wc);
875ab130
BP
2996 if (ctx->xin->allow_side_effects
2997 && flow->packet_type == htonl(PT_ETH)
2998 && in_port->pt_mode != NETDEV_PT_LEGACY_L3
2999 ) {
2d9b49dd 3000 update_learning_table(ctx, in_xbundle, flow->dl_src, vlan,
064799a1 3001 is_grat_arp);
9583bc14 3002 }
064799a1 3003 if (ctx->xin->xcache && in_xbundle != &ofpp_none_bundle) {
b256dc52
JS
3004 struct xc_entry *entry;
3005
064799a1 3006 /* Save just enough info to update mac learning table later. */
b256dc52 3007 entry = xlate_cache_add_entry(ctx->xin->xcache, XC_NORMAL);
901a517e
JR
3008 entry->normal.ofproto = ctx->xbridge->ofproto;
3009 entry->normal.in_port = flow->in_port.ofp_port;
3010 entry->normal.dl_src = flow->dl_src;
3011 entry->normal.vlan = vlan;
3012 entry->normal.is_gratuitous_arp = is_grat_arp;
b256dc52 3013 }
9583bc14
EJ
3014
3015 /* Determine output bundle. */
86e2dcdd
FL
3016 if (mcast_snooping_enabled(ctx->xbridge->ms)
3017 && !eth_addr_is_broadcast(flow->dl_dst)
3018 && eth_addr_is_multicast(flow->dl_dst)
06994f87 3019 && is_ip_any(flow)) {
86e2dcdd 3020 struct mcast_snooping *ms = ctx->xbridge->ms;
06994f87 3021 struct mcast_group *grp = NULL;
86e2dcdd 3022
a75636c8 3023 if (is_igmp(flow, wc)) {
1bc24169
BP
3024 /*
3025 * IGMP packets need to take the slow path, in order to be
3026 * processed for mdb updates. That will prevent expires
3027 * firing off even after hosts have sent reports.
3028 */
3029 ctx->xout->slow |= SLOW_ACTION;
3030
a75636c8 3031 memset(&wc->masks.tp_src, 0xff, sizeof wc->masks.tp_src);
d29f137b
TLSC
3032 if (mcast_snooping_is_membership(flow->tp_src) ||
3033 mcast_snooping_is_query(flow->tp_src)) {
df70a773 3034 if (ctx->xin->allow_side_effects && ctx->xin->packet) {
2d9b49dd 3035 update_mcast_snooping_table(ctx, flow, vlan,
e3102e42 3036 in_xbundle, ctx->xin->packet);
d29f137b 3037 }
86e2dcdd 3038 }
d6d5bbc9 3039
86e2dcdd 3040 if (mcast_snooping_is_membership(flow->tp_src)) {
efc93e66
BP
3041 struct mcast_output out = MCAST_OUTPUT_INIT;
3042
86e2dcdd 3043 ovs_rwlock_rdlock(&ms->rwlock);
efc93e66
BP
3044 xlate_normal_mcast_send_mrouters(ctx, ms, in_xbundle, &xvlan,
3045 &out);
8e04a33f
FL
3046 /* RFC4541: section 2.1.1, item 1: A snooping switch should
3047 * forward IGMP Membership Reports only to those ports where
3048 * multicast routers are attached. Alternatively stated: a
3049 * snooping switch should not forward IGMP Membership Reports
3050 * to ports on which only hosts are attached.
3051 * An administrative control may be provided to override this
3052 * restriction, allowing the report messages to be flooded to
3053 * other ports. */
efc93e66 3054 xlate_normal_mcast_send_rports(ctx, ms, in_xbundle, &out);
86e2dcdd 3055 ovs_rwlock_unlock(&ms->rwlock);
efc93e66
BP
3056
3057 mcast_output_finish(ctx, &out, in_xbundle, &xvlan);
86e2dcdd 3058 } else {
2d9b49dd 3059 xlate_report(ctx, OFT_DETAIL, "multicast traffic, flooding");
f0fb825a 3060 xlate_normal_flood(ctx, in_xbundle, &xvlan);
86e2dcdd
FL
3061 }
3062 return;
a75636c8 3063 } else if (is_mld(flow, wc)) {
06994f87 3064 ctx->xout->slow |= SLOW_ACTION;
df70a773 3065 if (ctx->xin->allow_side_effects && ctx->xin->packet) {
2d9b49dd 3066 update_mcast_snooping_table(ctx, flow, vlan,
06994f87
TLSC
3067 in_xbundle, ctx->xin->packet);
3068 }
a75636c8 3069 if (is_mld_report(flow, wc)) {
efc93e66
BP
3070 struct mcast_output out = MCAST_OUTPUT_INIT;
3071
06994f87 3072 ovs_rwlock_rdlock(&ms->rwlock);
efc93e66
BP
3073 xlate_normal_mcast_send_mrouters(ctx, ms, in_xbundle, &xvlan,
3074 &out);
3075 xlate_normal_mcast_send_rports(ctx, ms, in_xbundle, &out);
06994f87 3076 ovs_rwlock_unlock(&ms->rwlock);
efc93e66
BP
3077
3078 mcast_output_finish(ctx, &out, in_xbundle, &xvlan);
06994f87 3079 } else {
2d9b49dd 3080 xlate_report(ctx, OFT_DETAIL, "MLD query, flooding");
f0fb825a 3081 xlate_normal_flood(ctx, in_xbundle, &xvlan);
06994f87 3082 }
86e2dcdd 3083 } else {
a75636c8 3084 if (is_ip_local_multicast(flow, wc)) {
86e2dcdd
FL
3085 /* RFC4541: section 2.1.2, item 2: Packets with a dst IP
3086 * address in the 224.0.0.x range which are not IGMP must
3087 * be forwarded on all ports */
2d9b49dd
BP
3088 xlate_report(ctx, OFT_DETAIL,
3089 "RFC4541: section 2.1.2, item 2, flooding");
f0fb825a 3090 xlate_normal_flood(ctx, in_xbundle, &xvlan);
86e2dcdd
FL
3091 return;
3092 }
3093 }
3094
3095 /* forwarding to group base ports */
efc93e66
BP
3096 struct mcast_output out = MCAST_OUTPUT_INIT;
3097
86e2dcdd 3098 ovs_rwlock_rdlock(&ms->rwlock);
06994f87
TLSC
3099 if (flow->dl_type == htons(ETH_TYPE_IP)) {
3100 grp = mcast_snooping_lookup4(ms, flow->nw_dst, vlan);
3101 } else if (flow->dl_type == htons(ETH_TYPE_IPV6)) {
3102 grp = mcast_snooping_lookup(ms, &flow->ipv6_dst, vlan);
3103 }
86e2dcdd 3104 if (grp) {
efc93e66
BP
3105 xlate_normal_mcast_send_group(ctx, ms, grp, in_xbundle, &out);
3106 xlate_normal_mcast_send_fports(ctx, ms, in_xbundle, &out);
3107 xlate_normal_mcast_send_mrouters(ctx, ms, in_xbundle, &xvlan,
3108 &out);
9583bc14 3109 } else {
86e2dcdd 3110 if (mcast_snooping_flood_unreg(ms)) {
2d9b49dd
BP
3111 xlate_report(ctx, OFT_DETAIL,
3112 "unregistered multicast, flooding");
efc93e66 3113 out.flood = true;
86e2dcdd 3114 } else {
efc93e66
BP
3115 xlate_normal_mcast_send_mrouters(ctx, ms, in_xbundle, &xvlan,
3116 &out);
3117 xlate_normal_mcast_send_fports(ctx, ms, in_xbundle, &out);
86e2dcdd 3118 }
9583bc14 3119 }
86e2dcdd 3120 ovs_rwlock_unlock(&ms->rwlock);
efc93e66
BP
3121
3122 mcast_output_finish(ctx, &out, in_xbundle, &xvlan);
9583bc14 3123 } else {
86e2dcdd
FL
3124 ovs_rwlock_rdlock(&ctx->xbridge->ml->rwlock);
3125 mac = mac_learning_lookup(ctx->xbridge->ml, flow->dl_dst, vlan);
9d078ec2 3126 mac_port = mac ? mac_entry_get_port(ctx->xbridge->ml, mac) : NULL;
86e2dcdd
FL
3127 ovs_rwlock_unlock(&ctx->xbridge->ml->rwlock);
3128
3129 if (mac_port) {
0506f184 3130 struct xbundle *mac_xbundle = xbundle_lookup(ctx->xcfg, mac_port);
84dd881b
DL
3131
3132 if (mac_xbundle && xbundle_mirror_out(ctx->xbridge, mac_xbundle)) {
3133 xlate_report(ctx, OFT_WARN,
3134 "learned port is a mirror port, dropping");
3135 return;
3136 }
3137
c781bd52
LJ
3138 if (mac_xbundle
3139 && mac_xbundle != in_xbundle
3140 && mac_xbundle->ofbundle != in_xbundle->ofbundle) {
2d9b49dd 3141 xlate_report(ctx, OFT_DETAIL, "forwarding to learned port");
f0fb825a 3142 output_normal(ctx, mac_xbundle, &xvlan);
86e2dcdd 3143 } else if (!mac_xbundle) {
2d9b49dd
BP
3144 xlate_report(ctx, OFT_WARN,
3145 "learned port is unknown, dropping");
86e2dcdd 3146 } else {
2d9b49dd
BP
3147 xlate_report(ctx, OFT_DETAIL,
3148 "learned port is input port, dropping");
86e2dcdd
FL
3149 }
3150 } else {
2d9b49dd
BP
3151 xlate_report(ctx, OFT_DETAIL,
3152 "no learned MAC for destination, flooding");
f0fb825a 3153 xlate_normal_flood(ctx, in_xbundle, &xvlan);
86e2dcdd 3154 }
9583bc14
EJ
3155 }
3156}
3157
a6092018
BP
3158/* Appends a "sample" action for sFlow or IPFIX to 'ctx->odp_actions'. The
3159 * 'probability' is the number of packets out of UINT32_MAX to sample. The
8de6ff3e
JP
3160 * 'cookie' is passed back in the callback for each sampled packet.
3161 * 'tunnel_out_port', if not ODPP_NONE, is added as the
3162 * OVS_USERSPACE_ATTR_EGRESS_TUN_PORT attribute. If 'include_actions',
3163 * an OVS_USERSPACE_ATTR_ACTIONS attribute is added. If
3164 * 'emit_set_tunnel', sample(sampling_port=1) would translate into
3165 * datapath sample action set(tunnel(...)), sample(...) and it is used
3166 * for sampling egress tunnel information.
9583bc14
EJ
3167 */
3168static size_t
a6092018 3169compose_sample_action(struct xlate_ctx *ctx,
9583bc14 3170 const uint32_t probability,
8de6ff3e 3171 const struct user_action_cookie *cookie,
7321bda3
NM
3172 const odp_port_t tunnel_out_port,
3173 bool include_actions)
9583bc14 3174{
b97f2c3a
BY
3175 if (probability == 0) {
3176 /* No need to generate sampling or the inner action. */
3177 return 0;
3178 }
3179
31b29c2e
AZ
3180 /* If the slow path meter is configured by the controller,
3181 * insert a meter action before the user space action. */
3182 struct ofproto *ofproto = &ctx->xin->ofproto->up;
3183 uint32_t meter_id = ofproto->slowpath_meter_id;
3184
3185 /* When meter action is not required, avoid generate sample action
3186 * for 100% sampling rate. */
3187 bool is_sample = probability < UINT32_MAX || meter_id != UINT32_MAX;
dc2c9ce3 3188 size_t sample_offset = 0, actions_offset = 0;
72471622
BY
3189 if (is_sample) {
3190 sample_offset = nl_msg_start_nested(ctx->odp_actions,
3191 OVS_ACTION_ATTR_SAMPLE);
3192 nl_msg_put_u32(ctx->odp_actions, OVS_SAMPLE_ATTR_PROBABILITY,
3193 probability);
3194 actions_offset = nl_msg_start_nested(ctx->odp_actions,
3195 OVS_SAMPLE_ATTR_ACTIONS);
3196 }
9583bc14 3197
31b29c2e
AZ
3198 if (meter_id != UINT32_MAX) {
3199 nl_msg_put_u32(ctx->odp_actions, OVS_ACTION_ATTR_METER, meter_id);
3200 }
3201
a6092018
BP
3202 odp_port_t odp_port = ofp_port_to_odp_port(
3203 ctx->xbridge, ctx->xin->flow.in_port.ofp_port);
769b5034 3204 uint32_t pid = dpif_port_get_pid(ctx->xbridge->dpif, odp_port);
3276f2f2
LH
3205 size_t cookie_offset = odp_put_userspace_action(pid, cookie,
3206 sizeof *cookie,
3207 tunnel_out_port,
3208 include_actions,
3209 ctx->odp_actions);
89a8a7f0 3210
72471622
BY
3211 if (is_sample) {
3212 nl_msg_end_nested(ctx->odp_actions, actions_offset);
3213 nl_msg_end_nested(ctx->odp_actions, sample_offset);
3214 }
9583bc14 3215
9583bc14
EJ
3216 return cookie_offset;
3217}
3218
a6092018
BP
3219/* If sFLow is not enabled, returns 0 without doing anything.
3220 *
3221 * If sFlow is enabled, appends a template "sample" action to the ODP actions
3222 * in 'ctx'. This action is a template because some of the information needed
3223 * to fill it out is not available until flow translation is complete. In this
3224 * case, this functions returns an offset, which is always nonzero, to pass
3225 * later to fix_sflow_action() to fill in the rest of the template. */
9583bc14 3226static size_t
a6092018 3227compose_sflow_action(struct xlate_ctx *ctx)
9583bc14 3228{
a6092018
BP
3229 struct dpif_sflow *sflow = ctx->xbridge->sflow;
3230 if (!sflow || ctx->xin->flow.in_port.ofp_port == OFPP_NONE) {
9583bc14
EJ
3231 return 0;
3232 }
3233
24a4bbe1
IM
3234 struct user_action_cookie cookie;
3235
3236 memset(&cookie, 0, sizeof cookie);
3237 cookie.type = USER_ACTION_COOKIE_SFLOW;
3238 cookie.ofp_in_port = ctx->xin->flow.in_port.ofp_port;
3239 cookie.ofproto_uuid = ctx->xbridge->ofproto->uuid;
3240
a6092018 3241 return compose_sample_action(ctx, dpif_sflow_get_probability(sflow),
8de6ff3e 3242 &cookie, ODPP_NONE, true);
9583bc14
EJ
3243}
3244
f69f713b
BY
3245/* If flow IPFIX is enabled, make sure IPFIX flow sample action
3246 * at egress point of tunnel port is just in front of corresponding
3247 * output action. If bridge IPFIX is enabled, this appends an IPFIX
3248 * sample action to 'ctx->odp_actions'. */
9583bc14 3249static void
a6092018 3250compose_ipfix_action(struct xlate_ctx *ctx, odp_port_t output_odp_port)
9583bc14 3251{
a6092018 3252 struct dpif_ipfix *ipfix = ctx->xbridge->ipfix;
8b7ea2d4 3253 odp_port_t tunnel_out_port = ODPP_NONE;
9583bc14 3254
a6092018 3255 if (!ipfix || ctx->xin->flow.in_port.ofp_port == OFPP_NONE) {
9583bc14
EJ
3256 return;
3257 }
3258
8b7ea2d4
WZ
3259 /* For input case, output_odp_port is ODPP_NONE, which is an invalid port
3260 * number. */
3261 if (output_odp_port == ODPP_NONE &&
a6092018 3262 !dpif_ipfix_get_bridge_exporter_input_sampling(ipfix)) {
8b7ea2d4
WZ
3263 return;
3264 }
3265
f69f713b 3266 /* For output case, output_odp_port is valid. */
8b7ea2d4 3267 if (output_odp_port != ODPP_NONE) {
a6092018 3268 if (!dpif_ipfix_get_bridge_exporter_output_sampling(ipfix)) {
8b7ea2d4
WZ
3269 return;
3270 }
3271 /* If tunnel sampling is enabled, put an additional option attribute:
3272 * OVS_USERSPACE_ATTR_TUNNEL_OUT_PORT
3273 */
a6092018 3274 if (dpif_ipfix_get_bridge_exporter_tunnel_sampling(ipfix) &&
cd32509e 3275 dpif_ipfix_is_tunnel_port(ipfix, output_odp_port) ) {
8b7ea2d4
WZ
3276 tunnel_out_port = output_odp_port;
3277 }
3278 }
3279
24a4bbe1
IM
3280 struct user_action_cookie cookie;
3281
3282 memset(&cookie, 0, sizeof cookie);
3283 cookie.type = USER_ACTION_COOKIE_IPFIX;
3284 cookie.ofp_in_port = ctx->xin->flow.in_port.ofp_port;
3285 cookie.ofproto_uuid = ctx->xbridge->ofproto->uuid;
3286 cookie.ipfix.output_odp_port = output_odp_port;
3287
a6092018
BP
3288 compose_sample_action(ctx,
3289 dpif_ipfix_get_bridge_exporter_probability(ipfix),
8de6ff3e 3290 &cookie, tunnel_out_port, false);
9583bc14
EJ
3291}
3292
a6092018
BP
3293/* Fix "sample" action according to data collected while composing ODP actions,
3294 * as described in compose_sflow_action().
3295 *
8de6ff3e
JP
3296 * 'user_cookie_offset' must be the offset returned by
3297 * compose_sflow_action(). */
9583bc14 3298static void
a6092018 3299fix_sflow_action(struct xlate_ctx *ctx, unsigned int user_cookie_offset)
9583bc14
EJ
3300{
3301 const struct flow *base = &ctx->base_flow;
8de6ff3e 3302 struct user_action_cookie *cookie;
9583bc14 3303
8de6ff3e 3304 cookie = ofpbuf_at(ctx->odp_actions, user_cookie_offset, sizeof *cookie);
9583bc14
EJ
3305 ovs_assert(cookie->type == USER_ACTION_COOKIE_SFLOW);
3306
f0fb825a 3307 cookie->sflow.vlan_tci = base->vlans[0].tci;
a6092018
BP
3308
3309 /* See http://www.sflow.org/sflow_version_5.txt (search for "Input/output
3310 * port information") for the interpretation of cookie->output. */
3311 switch (ctx->sflow_n_outputs) {
3312 case 0:
3313 /* 0x40000000 | 256 means "packet dropped for unknown reason". */
3314 cookie->sflow.output = 0x40000000 | 256;
3315 break;
3316
3317 case 1:
3318 cookie->sflow.output = dpif_sflow_odp_port_to_ifindex(
3319 ctx->xbridge->sflow, ctx->sflow_odp_port);
3320 if (cookie->sflow.output) {
3321 break;
3322 }
3323 /* Fall through. */
3324 default:
3325 /* 0x80000000 means "multiple output ports. */
3326 cookie->sflow.output = 0x80000000 | ctx->sflow_n_outputs;
3327 break;
3328 }
9583bc14
EJ
3329}
3330
515793d5
BP
3331static bool
3332process_special(struct xlate_ctx *ctx, const struct xport *xport)
db7d4e46 3333{
515793d5 3334 const struct flow *flow = &ctx->xin->flow;
49a73e0c 3335 struct flow_wildcards *wc = ctx->wc;
46c88433 3336 const struct xbridge *xbridge = ctx->xbridge;
515793d5
BP
3337 const struct dp_packet *packet = ctx->xin->packet;
3338 enum slow_path_reason slow;
a8448cb1 3339 bool lacp_may_enable;
642dc74d 3340
46c88433 3341 if (!xport) {
515793d5 3342 slow = 0;
46c88433 3343 } else if (xport->cfm && cfm_should_process_flow(xport->cfm, flow, wc)) {
db7d4e46 3344 if (packet) {
46c88433 3345 cfm_process_heartbeat(xport->cfm, packet);
db7d4e46 3346 }
515793d5 3347 slow = SLOW_CFM;
fab52e16 3348 } else if (xport->bfd && bfd_should_process_flow(xport->bfd, flow, wc)) {
db7d4e46 3349 if (packet) {
46c88433 3350 bfd_process_packet(xport->bfd, flow, packet);
60d02c72
AW
3351 /* If POLL received, immediately sends FINAL back. */
3352 if (bfd_should_send_packet(xport->bfd)) {
6d308b28 3353 ofproto_dpif_monitor_port_send_soon(xport->ofport);
60d02c72 3354 }
db7d4e46 3355 }
515793d5 3356 slow = SLOW_BFD;
46c88433 3357 } else if (xport->xbundle && xport->xbundle->lacp
db7d4e46
JP
3358 && flow->dl_type == htons(ETH_TYPE_LACP)) {
3359 if (packet) {
a8448cb1
NK
3360 lacp_may_enable = lacp_process_packet(xport->xbundle->lacp,
3361 xport->ofport, packet);
3362 /* Update LACP status in bond-slave to avoid packet-drops until
3363 * LACP state machine is run by the main thread. */
3364 if (xport->xbundle->bond && lacp_may_enable) {
3365 bond_slave_set_may_enable(xport->xbundle->bond, xport->ofport,
3366 lacp_may_enable);
3367 }
db7d4e46 3368 }
515793d5 3369 slow = SLOW_LACP;
9efd308e
DV
3370 } else if ((xbridge->stp || xbridge->rstp) &&
3371 stp_should_process_flow(flow, wc)) {
db7d4e46 3372 if (packet) {
f025bcb7
JR
3373 xbridge->stp
3374 ? stp_process_packet(xport, packet)
3375 : rstp_process_packet(xport, packet);
db7d4e46 3376 }
515793d5 3377 slow = SLOW_STP;
19aef6ef 3378 } else if (xport->lldp && lldp_should_process_flow(xport->lldp, flow)) {
0477baa9
DF
3379 if (packet) {
3380 lldp_process_packet(xport->lldp, packet);
3381 }
515793d5 3382 slow = SLOW_LLDP;
db7d4e46 3383 } else {
515793d5
BP
3384 slow = 0;
3385 }
3386
3387 if (slow) {
3388 ctx->xout->slow |= slow;
3389 return true;
3390 } else {
3391 return false;
db7d4e46
JP
3392 }
3393}
3394
a36de779 3395static int
0506f184
HH
3396tnl_route_lookup_flow(const struct xlate_ctx *ctx,
3397 const struct flow *oflow,
a8704b50
PS
3398 struct in6_addr *ip, struct in6_addr *src,
3399 struct xport **out_port)
a36de779
PS
3400{
3401 char out_dev[IFNAMSIZ];
3402 struct xbridge *xbridge;
c2b878e0
TLSC
3403 struct in6_addr gw;
3404 struct in6_addr dst;
a36de779 3405
c2b878e0 3406 dst = flow_tnl_dst(&oflow->tunnel);
ed52ca57 3407 if (!ovs_router_lookup(oflow->pkt_mark, &dst, out_dev, src, &gw)) {
a36de779
PS
3408 return -ENOENT;
3409 }
3410
c2b878e0
TLSC
3411 if (ipv6_addr_is_set(&gw) &&
3412 (!IN6_IS_ADDR_V4MAPPED(&gw) || in6_addr_get_mapped_ipv4(&gw))) {
a36de779
PS
3413 *ip = gw;
3414 } else {
c2b878e0 3415 *ip = dst;
a36de779
PS
3416 }
3417
0506f184 3418 HMAP_FOR_EACH (xbridge, hmap_node, &ctx->xcfg->xbridges) {
a36de779
PS
3419 if (!strncmp(xbridge->name, out_dev, IFNAMSIZ)) {
3420 struct xport *port;
3421
3422 HMAP_FOR_EACH (port, ofp_node, &xbridge->xports) {
3423 if (!strncmp(netdev_get_name(port->netdev), out_dev, IFNAMSIZ)) {
3424 *out_port = port;
3425 return 0;
3426 }
3427 }
3428 }
3429 }
dc0bd12f
YS
3430
3431 /* If tunnel IP isn't configured on bridges, then we search all ports. */
3432 HMAP_FOR_EACH (xbridge, hmap_node, &ctx->xcfg->xbridges) {
3433 struct xport *port;
3434
3435 HMAP_FOR_EACH (port, ofp_node, &xbridge->xports) {
3436 if (!strncmp(netdev_get_name(port->netdev),
3437 out_dev, IFNAMSIZ)) {
3438 *out_port = port;
3439 return 0;
3440 }
3441 }
3442 }
a36de779
PS
3443 return -ENOENT;
3444}
3445
3446static int
cdd42eda
JG
3447compose_table_xlate(struct xlate_ctx *ctx, const struct xport *out_dev,
3448 struct dp_packet *packet)
a36de779 3449{
cdd42eda 3450 struct xbridge *xbridge = out_dev->xbridge;
ea41c034 3451 ovs_version_t version = ofproto_dpif_get_tables_version(xbridge->ofproto);
a36de779
PS
3452 struct ofpact_output output;
3453 struct flow flow;
3454
3455 ofpact_init(&output.ofpact, OFPACT_OUTPUT, sizeof output);
cf62fa4c 3456 flow_extract(packet, &flow);
cdd42eda
JG
3457 flow.in_port.ofp_port = out_dev->ofp_port;
3458 output.port = OFPP_TABLE;
a36de779
PS
3459 output.max_len = 0;
3460
ea41c034 3461 return ofproto_dpif_execute_actions__(xbridge->ofproto, version, &flow,
1f4a8933 3462 NULL, &output.ofpact, sizeof output,
2d9b49dd 3463 ctx->depth, ctx->resubmits, packet);
a36de779
PS
3464}
3465
c2b878e0
TLSC
3466static void
3467tnl_send_nd_request(struct xlate_ctx *ctx, const struct xport *out_dev,
3468 const struct eth_addr eth_src,
3469 struct in6_addr * ipv6_src, struct in6_addr * ipv6_dst)
3470{
3471 struct dp_packet packet;
3472
3473 dp_packet_init(&packet, 0);
16187903 3474 compose_nd_ns(&packet, eth_src, ipv6_src, ipv6_dst);
c2b878e0
TLSC
3475 compose_table_xlate(ctx, out_dev, &packet);
3476 dp_packet_uninit(&packet);
3477}
3478
a36de779 3479static void
cdd42eda 3480tnl_send_arp_request(struct xlate_ctx *ctx, const struct xport *out_dev,
74ff3298 3481 const struct eth_addr eth_src,
a36de779
PS
3482 ovs_be32 ip_src, ovs_be32 ip_dst)
3483{
cf62fa4c 3484 struct dp_packet packet;
a36de779 3485
cf62fa4c 3486 dp_packet_init(&packet, 0);
eb0b295e
BP
3487 compose_arp(&packet, ARP_OP_REQUEST,
3488 eth_src, eth_addr_zero, true, ip_src, ip_dst);
a36de779 3489
cdd42eda 3490 compose_table_xlate(ctx, out_dev, &packet);
cf62fa4c 3491 dp_packet_uninit(&packet);
a36de779
PS
3492}
3493
7c12dfc5
SC
3494static void
3495propagate_tunnel_data_to_flow__(struct flow *dst_flow,
3496 const struct flow *src_flow,
3497 struct eth_addr dmac, struct eth_addr smac,
3498 struct in6_addr s_ip6, ovs_be32 s_ip,
3499 bool is_tnl_ipv6, uint8_t nw_proto)
3500{
3501 dst_flow->dl_dst = dmac;
3502 dst_flow->dl_src = smac;
3503
3504 dst_flow->packet_type = htonl(PT_ETH);
3505 dst_flow->nw_dst = src_flow->tunnel.ip_dst;
3506 dst_flow->nw_src = src_flow->tunnel.ip_src;
3507 dst_flow->ipv6_dst = src_flow->tunnel.ipv6_dst;
3508 dst_flow->ipv6_src = src_flow->tunnel.ipv6_src;
3509
9cce7e71 3510 dst_flow->nw_frag = 0; /* Tunnel packets are unfragmented. */
7c12dfc5
SC
3511 dst_flow->nw_tos = src_flow->tunnel.ip_tos;
3512 dst_flow->nw_ttl = src_flow->tunnel.ip_ttl;
3513 dst_flow->tp_dst = src_flow->tunnel.tp_dst;
3514 dst_flow->tp_src = src_flow->tunnel.tp_src;
3515
3516 if (is_tnl_ipv6) {
3517 dst_flow->dl_type = htons(ETH_TYPE_IPV6);
3518 if (ipv6_mask_is_any(&dst_flow->ipv6_src)
3519 && !ipv6_mask_is_any(&s_ip6)) {
3520 dst_flow->ipv6_src = s_ip6;
3521 }
3522 } else {
3523 dst_flow->dl_type = htons(ETH_TYPE_IP);
3524 if (dst_flow->nw_src == 0 && s_ip) {
3525 dst_flow->nw_src = s_ip;
3526 }
3527 }
3528 dst_flow->nw_proto = nw_proto;
3529}
3530
3531/*
3532 * Populate the 'flow' and 'base_flow' L3 fields to do the post tunnel push
3533 * translations.
3534 */
3535static void
3536propagate_tunnel_data_to_flow(struct xlate_ctx *ctx, struct eth_addr dmac,
3537 struct eth_addr smac, struct in6_addr s_ip6,
3538 ovs_be32 s_ip, bool is_tnl_ipv6,
3539 enum ovs_vport_type tnl_type)
3540{
3541 struct flow *base_flow, *flow;
3542 flow = &ctx->xin->flow;
3543 base_flow = &ctx->base_flow;
3544 uint8_t nw_proto = 0;
3545
3546 switch (tnl_type) {
3547 case OVS_VPORT_TYPE_GRE:
c387d817
GR
3548 case OVS_VPORT_TYPE_ERSPAN:
3549 case OVS_VPORT_TYPE_IP6ERSPAN:
3550 case OVS_VPORT_TYPE_IP6GRE:
7c12dfc5
SC
3551 nw_proto = IPPROTO_GRE;
3552 break;
3553 case OVS_VPORT_TYPE_VXLAN:
3554 case OVS_VPORT_TYPE_GENEVE:
3555 nw_proto = IPPROTO_UDP;
3556 break;
3557 case OVS_VPORT_TYPE_LISP:
3558 case OVS_VPORT_TYPE_STT:
3559 case OVS_VPORT_TYPE_UNSPEC:
3560 case OVS_VPORT_TYPE_NETDEV:
3561 case OVS_VPORT_TYPE_INTERNAL:
3562 case __OVS_VPORT_TYPE_MAX:
3563 default:
3564 OVS_NOT_REACHED();
7c12dfc5
SC
3565 }
3566 /*
3567 * Update base_flow first followed by flow as the dst_flow gets modified
3568 * in the function.
3569 */
3570 propagate_tunnel_data_to_flow__(base_flow, flow, dmac, smac, s_ip6, s_ip,
3571 is_tnl_ipv6, nw_proto);
3572 propagate_tunnel_data_to_flow__(flow, flow, dmac, smac, s_ip6, s_ip,
3573 is_tnl_ipv6, nw_proto);
3574}
3575
a36de779 3576static int
11938578
AZ
3577native_tunnel_output(struct xlate_ctx *ctx, const struct xport *xport,
3578 const struct flow *flow, odp_port_t tunnel_odp_port,
3579 bool truncate)
a36de779 3580{
4975aa3e 3581 struct netdev_tnl_build_header_params tnl_params;
a36de779
PS
3582 struct ovs_action_push_tnl tnl_push_data;
3583 struct xport *out_dev = NULL;
c2b878e0
TLSC
3584 ovs_be32 s_ip = 0, d_ip = 0;
3585 struct in6_addr s_ip6 = in6addr_any;
3586 struct in6_addr d_ip6 = in6addr_any;
74ff3298
JR
3587 struct eth_addr smac;
3588 struct eth_addr dmac;
a36de779 3589 int err;
c2b878e0
TLSC
3590 char buf_sip6[INET6_ADDRSTRLEN];
3591 char buf_dip6[INET6_ADDRSTRLEN];
a36de779 3592
283d8662
ZB
3593 /* Store sFlow data. */
3594 uint32_t sflow_n_outputs = ctx->sflow_n_outputs;
3595
7c12dfc5
SC
3596 /* Structures to backup Ethernet and IP of base_flow. */
3597 struct flow old_base_flow;
3598 struct flow old_flow;
3599
3600 /* Backup flow & base_flow data. */
3601 memcpy(&old_base_flow, &ctx->base_flow, sizeof old_base_flow);
3602 memcpy(&old_flow, &ctx->xin->flow, sizeof old_flow);
5a0e4aec 3603
8e4e4588 3604 if (flow->tunnel.ip_src) {
3605 in6_addr_set_mapped_ipv4(&s_ip6, flow->tunnel.ip_src);
3606 }
7c12dfc5 3607
0506f184 3608 err = tnl_route_lookup_flow(ctx, flow, &d_ip6, &s_ip6, &out_dev);
a36de779 3609 if (err) {
2d9b49dd 3610 xlate_report(ctx, OFT_WARN, "native tunnel routing failed");
a36de779
PS
3611 return err;
3612 }
c2b878e0 3613
2d9b49dd 3614 xlate_report(ctx, OFT_DETAIL, "tunneling to %s via %s",
c2b878e0
TLSC
3615 ipv6_string_mapped(buf_dip6, &d_ip6),
3616 netdev_get_name(out_dev->netdev));
a36de779
PS
3617
3618 /* Use mac addr of bridge port of the peer. */
74ff3298 3619 err = netdev_get_etheraddr(out_dev->netdev, &smac);
a36de779 3620 if (err) {
2d9b49dd
BP
3621 xlate_report(ctx, OFT_WARN,
3622 "tunnel output device lacks Ethernet address");
a36de779
PS
3623 return err;
3624 }
3625
c2b878e0
TLSC
3626 d_ip = in6_addr_get_mapped_ipv4(&d_ip6);
3627 if (d_ip) {
a8704b50 3628 s_ip = in6_addr_get_mapped_ipv4(&s_ip6);
a36de779
PS
3629 }
3630
c2b878e0 3631 err = tnl_neigh_lookup(out_dev->xbridge->name, &d_ip6, &dmac);
a36de779 3632 if (err) {
2d9b49dd
BP
3633 xlate_report(ctx, OFT_DETAIL,
3634 "neighbor cache miss for %s on bridge %s, "
c2b878e0
TLSC
3635 "sending %s request",
3636 buf_dip6, out_dev->xbridge->name, d_ip ? "ARP" : "ND");
3637 if (d_ip) {
3638 tnl_send_arp_request(ctx, out_dev, smac, s_ip, d_ip);
3639 } else {
3640 tnl_send_nd_request(ctx, out_dev, smac, &s_ip6, &d_ip6);
3641 }
a36de779
PS
3642 return err;
3643 }
c2b878e0 3644
a36de779
PS
3645 if (ctx->xin->xcache) {
3646 struct xc_entry *entry;
3647
53902038 3648 entry = xlate_cache_add_entry(ctx->xin->xcache, XC_TNL_NEIGH);
901a517e
JR
3649 ovs_strlcpy(entry->tnl_neigh_cache.br_name, out_dev->xbridge->name,
3650 sizeof entry->tnl_neigh_cache.br_name);
3651 entry->tnl_neigh_cache.d_ipv6 = d_ip6;
a36de779 3652 }
81de18ec 3653
2d9b49dd 3654 xlate_report(ctx, OFT_DETAIL, "tunneling from "ETH_ADDR_FMT" %s"
c2b878e0
TLSC
3655 " to "ETH_ADDR_FMT" %s",
3656 ETH_ADDR_ARGS(smac), ipv6_string_mapped(buf_sip6, &s_ip6),
3657 ETH_ADDR_ARGS(dmac), buf_dip6);
3658
4975aa3e
PS
3659 netdev_init_tnl_build_header_params(&tnl_params, flow, &s_ip6, dmac, smac);
3660 err = tnl_port_build_header(xport->ofport, &tnl_push_data, &tnl_params);
a36de779
PS
3661 if (err) {
3662 return err;
3663 }
81765c00
BP
3664 tnl_push_data.tnl_port = tunnel_odp_port;
3665 tnl_push_data.out_port = out_dev->odp_port;
beb75a40 3666
7c12dfc5
SC
3667 /* After tunnel header has been added, MAC and IP data of flow and
3668 * base_flow need to be set properly, since there is not recirculation
3669 * any more when sending packet to tunnel. */
beb75a40 3670
11938578
AZ
3671 propagate_tunnel_data_to_flow(ctx, dmac, smac, s_ip6,
3672 s_ip, tnl_params.is_ipv6,
3673 tnl_push_data.tnl_type);
7c12dfc5 3674
11938578
AZ
3675 size_t clone_ofs = 0;
3676 size_t push_action_size;
7c12dfc5 3677
11938578
AZ
3678 clone_ofs = nl_msg_start_nested(ctx->odp_actions, OVS_ACTION_ATTR_CLONE);
3679 odp_put_tnl_push_action(ctx->odp_actions, &tnl_push_data);
3680 push_action_size = ctx->odp_actions->size;
3681
3682 if (!truncate) {
3683 const struct dpif_flow_stats *backup_resubmit_stats;
3684 struct xlate_cache *backup_xcache;
3685 struct flow_wildcards *backup_wc, wc;
3686 bool backup_side_effects;
3687 const struct dp_packet *backup_packet;
3688
3689 memset(&wc, 0 , sizeof wc);
3690 backup_wc = ctx->wc;
3691 ctx->wc = &wc;
3692 ctx->xin->wc = NULL;
3693 backup_resubmit_stats = ctx->xin->resubmit_stats;
3694 backup_xcache = ctx->xin->xcache;
3695 backup_side_effects = ctx->xin->allow_side_effects;
3696 backup_packet = ctx->xin->packet;
3697
3698 ctx->xin->resubmit_stats = NULL;
3699 ctx->xin->xcache = xlate_cache_new(); /* Use new temporary cache. */
3700 ctx->xin->allow_side_effects = false;
3701 ctx->xin->packet = NULL;
3702
3703 /* Push the cache entry for the tunnel first. */
3704 struct xc_entry *entry;
3705 entry = xlate_cache_add_entry(ctx->xin->xcache, XC_TUNNEL_HEADER);
3706 entry->tunnel_hdr.hdr_size = tnl_push_data.header_len;
3707 entry->tunnel_hdr.operation = ADD;
3708
3709 patch_port_output(ctx, xport, out_dev);
3710
3711 /* Similar to the stats update in revalidation, the x_cache entries
3712 * are populated by the previous translation are used to update the
3713 * stats correctly.
7c12dfc5 3714 */
11938578
AZ
3715 if (backup_resubmit_stats) {
3716 struct dpif_flow_stats stats = *backup_resubmit_stats;
16441315 3717 xlate_push_stats(ctx->xin->xcache, &stats, false);
11938578
AZ
3718 }
3719 xlate_cache_steal_entries(backup_xcache, ctx->xin->xcache);
3720
3721 if (ctx->odp_actions->size > push_action_size) {
3722 nl_msg_end_non_empty_nested(ctx->odp_actions, clone_ofs);
3723 } else {
3724 nl_msg_cancel_nested(ctx->odp_actions, clone_ofs);
11938578
AZ
3725 }
3726
3727 /* Restore context status. */
3728 ctx->xin->resubmit_stats = backup_resubmit_stats;
3729 xlate_cache_delete(ctx->xin->xcache);
3730 ctx->xin->xcache = backup_xcache;
3731 ctx->xin->allow_side_effects = backup_side_effects;
3732 ctx->xin->packet = backup_packet;
3733 ctx->wc = backup_wc;
3734 } else {
3735 /* In order to maintain accurate stats, use recirc for
3736 * natvie tunneling. */
7c12dfc5
SC
3737 nl_msg_put_u32(ctx->odp_actions, OVS_ACTION_ATTR_RECIRC, 0);
3738 nl_msg_end_nested(ctx->odp_actions, clone_ofs);
3739 }
11938578 3740
7c12dfc5
SC
3741 /* Restore the flows after the translation. */
3742 memcpy(&ctx->xin->flow, &old_flow, sizeof ctx->xin->flow);
3743 memcpy(&ctx->base_flow, &old_base_flow, sizeof ctx->base_flow);
283d8662
ZB
3744
3745 /* Restore sFlow data. */
3746 ctx->sflow_n_outputs = sflow_n_outputs;
3747
a36de779
PS
3748 return 0;
3749}
3750
704bb0bf
JS
3751static void
3752xlate_commit_actions(struct xlate_ctx *ctx)
3753{
3754 bool use_masked = ctx->xbridge->support.masked_set_action;
3755
3756 ctx->xout->slow |= commit_odp_actions(&ctx->xin->flow, &ctx->base_flow,
3757 ctx->odp_actions, ctx->wc,
1fc11c59 3758 use_masked, ctx->pending_encap,
88ec1e0a 3759 ctx->pending_decap, ctx->encap_data);
f839892a 3760 ctx->pending_encap = false;
88ec1e0a 3761 ctx->pending_decap = false;
1fc11c59
JS
3762 ofpbuf_delete(ctx->encap_data);
3763 ctx->encap_data = NULL;
704bb0bf
JS
3764}
3765
07659514 3766static void
72fe7578 3767clear_conntrack(struct xlate_ctx *ctx)
07659514 3768{
72fe7578 3769 ctx->conntracked = false;
6846e91e 3770 flow_clear_conntrack(&ctx->xin->flow);
07659514
JS
3771}
3772
58d636ee
BK
3773static bool
3774xlate_flow_is_protected(const struct xlate_ctx *ctx, const struct flow *flow, const struct xport *xport_out)
3775{
3776 const struct xport *xport_in;
3777
3778 if (!xport_out) {
3779 return false;
3780 }
3781
3782 xport_in = get_ofp_port(ctx->xbridge, flow->in_port.ofp_port);
3783
3784 return (xport_in && xport_in->xbundle && xport_out->xbundle &&
3785 xport_in->xbundle->protected && xport_out->xbundle->protected);
3786}
3787
48f704f4 3788/* Function handles when a packet is sent from one bridge to another bridge.
8bdb2bdb 3789 *
48f704f4
AZ
3790 * The bridges are internally connected, either with patch ports or with
3791 * tunnel ports.
3792 *
3793 * The output action to another bridge causes translation to continue within
3794 * the next bridge. This process can be recursive; the next bridge can
3795 * output yet to another bridge.
3796 *
3797 * The translated actions from the second bridge onwards are enclosed within
3798 * the clone action, so that any modification to the packet will not be visible
3799 * to the remaining actions of the originating bridge.
8bdb2bdb
SC
3800 */
3801static void
48f704f4
AZ
3802patch_port_output(struct xlate_ctx *ctx, const struct xport *in_dev,
3803 struct xport *out_dev)
8bdb2bdb
SC
3804{
3805 struct flow *flow = &ctx->xin->flow;
3806 struct flow old_flow = ctx->xin->flow;
3807 struct flow_tnl old_flow_tnl_wc = ctx->wc->masks.tunnel;
3808 bool old_conntrack = ctx->conntracked;
3809 bool old_was_mpls = ctx->was_mpls;
3810 ovs_version_t old_version = ctx->xin->tables_version;
3811 struct ofpbuf old_stack = ctx->stack;
3812 uint8_t new_stack[1024];
3813 struct ofpbuf old_action_set = ctx->action_set;
3814 struct ovs_list *old_trace = ctx->xin->trace;
3815 uint64_t actset_stub[1024 / 8];
3816
3817 ofpbuf_use_stub(&ctx->stack, new_stack, sizeof new_stack);
3818 ofpbuf_use_stub(&ctx->action_set, actset_stub, sizeof actset_stub);
3819 flow->in_port.ofp_port = out_dev->ofp_port;
3820 flow->metadata = htonll(0);
3821 memset(&flow->tunnel, 0, sizeof flow->tunnel);
86bfb29a 3822 memset(&ctx->wc->masks.tunnel, 0, sizeof ctx->wc->masks.tunnel);
8bdb2bdb
SC
3823 flow->tunnel.metadata.tab =
3824 ofproto_get_tun_tab(&out_dev->xbridge->ofproto->up);
3825 ctx->wc->masks.tunnel.metadata.tab = flow->tunnel.metadata.tab;
3826 memset(flow->regs, 0, sizeof flow->regs);
3827 flow->actset_output = OFPP_UNSET;
3828 clear_conntrack(ctx);
3829 ctx->xin->trace = xlate_report(ctx, OFT_BRIDGE, "bridge(\"%s\")",
3830 out_dev->xbridge->name);
3831 mirror_mask_t old_mirrors = ctx->mirrors;
3832 bool independent_mirrors = out_dev->xbridge != ctx->xbridge;
3833 if (independent_mirrors) {
3834 ctx->mirrors = 0;
3835 }
3836 ctx->xbridge = out_dev->xbridge;
3837
3838 /* The bridge is now known so obtain its table version. */
3839 ctx->xin->tables_version
3840 = ofproto_dpif_get_tables_version(ctx->xbridge->ofproto);
3841
3842 if (!process_special(ctx, out_dev) && may_receive(out_dev, ctx)) {
3843 if (xport_stp_forward_state(out_dev) &&
3844 xport_rstp_forward_state(out_dev)) {
3845 xlate_table_action(ctx, flow->in_port.ofp_port, 0, true, true,
96c3a6e5 3846 false, true, clone_xlate_actions);
8bdb2bdb
SC
3847 if (!ctx->freezing) {
3848 xlate_action_set(ctx);
3849 }
3850 if (ctx->freezing) {
3851 finish_freezing(ctx);
3852 }
3853 } else {
3854 /* Forwarding is disabled by STP and RSTP. Let OFPP_NORMAL and
3855 * the learning action look at the packet, then drop it. */
3856 struct flow old_base_flow = ctx->base_flow;
3857 size_t old_size = ctx->odp_actions->size;
3858 mirror_mask_t old_mirrors2 = ctx->mirrors;
3859
3860 xlate_table_action(ctx, flow->in_port.ofp_port, 0, true, true,
96c3a6e5 3861 false, true, clone_xlate_actions);
8bdb2bdb
SC
3862 ctx->mirrors = old_mirrors2;
3863 ctx->base_flow = old_base_flow;
3864 ctx->odp_actions->size = old_size;
3865
3866 /* Undo changes that may have been done for freezing. */
3867 ctx_cancel_freeze(ctx);
3868 }
3869 }
3870
3871 ctx->xin->trace = old_trace;
3872 if (independent_mirrors) {
3873 ctx->mirrors = old_mirrors;
3874 }
3875 ctx->xin->flow = old_flow;
3876 ctx->xbridge = in_dev->xbridge;
3877 ofpbuf_uninit(&ctx->action_set);
3878 ctx->action_set = old_action_set;
3879 ofpbuf_uninit(&ctx->stack);
3880 ctx->stack = old_stack;
3881
3882 /* Restore calling bridge's lookup version. */
3883 ctx->xin->tables_version = old_version;
3884
3885 /* Restore to calling bridge tunneling information */
3886 ctx->wc->masks.tunnel = old_flow_tnl_wc;
3887
3888 /* The out bridge popping MPLS should have no effect on the original
3889 * bridge. */
3890 ctx->was_mpls = old_was_mpls;
3891
3892 /* The out bridge's conntrack execution should have no effect on the
3893 * original bridge. */
3894 ctx->conntracked = old_conntrack;
3895
3896 /* The fact that the out bridge exits (for any reason) does not mean
3897 * that the original bridge should exit. Specifically, if the out
3898 * bridge freezes translation, the original bridge must continue
3899 * processing with the original, not the frozen packet! */
3900 ctx->exit = false;
3901
3902 /* Out bridge errors do not propagate back. */
3903 ctx->error = XLATE_OK;
3904
3905 if (ctx->xin->resubmit_stats) {
3906 netdev_vport_inc_tx(in_dev->netdev, ctx->xin->resubmit_stats);
3907 netdev_vport_inc_rx(out_dev->netdev, ctx->xin->resubmit_stats);
3908 if (out_dev->bfd) {
3909 bfd_account_rx(out_dev->bfd, ctx->xin->resubmit_stats);
3910 }
3911 }
3912 if (ctx->xin->xcache) {
3913 struct xc_entry *entry;
3914
3915 entry = xlate_cache_add_entry(ctx->xin->xcache, XC_NETDEV);
3916 entry->dev.tx = netdev_ref(in_dev->netdev);
3917 entry->dev.rx = netdev_ref(out_dev->netdev);
3918 entry->dev.bfd = bfd_ref(out_dev->bfd);
3919 }
3920}
3921
081617f0
JS
3922static bool
3923check_output_prerequisites(struct xlate_ctx *ctx,
3924 const struct xport *xport,
3925 struct flow *flow,
3926 bool check_stp)
9583bc14 3927{
49a73e0c 3928 struct flow_wildcards *wc = ctx->wc;
9583bc14 3929
46c88433 3930 if (!xport) {
2d9b49dd 3931 xlate_report(ctx, OFT_WARN, "Nonexistent output port");
081617f0 3932 return false;
46c88433 3933 } else if (xport->config & OFPUTIL_PC_NO_FWD) {
2d9b49dd 3934 xlate_report(ctx, OFT_DETAIL, "OFPPC_NO_FWD set, skipping output");
081617f0 3935 return false;
1356dbd1 3936 } else if (ctx->mirror_snaplen != 0 && xport->odp_port == ODPP_NONE) {
2d9b49dd
BP
3937 xlate_report(ctx, OFT_WARN,
3938 "Mirror truncate to ODPP_NONE, skipping output");
081617f0 3939 return false;
58d636ee 3940 } else if (xlate_flow_is_protected(ctx, flow, xport)) {
2d9b49dd
BP
3941 xlate_report(ctx, OFT_WARN,
3942 "Flow is between protected ports, skipping output.");
081617f0 3943 return false;
0d1cee12 3944 } else if (check_stp) {
bbbca389 3945 if (is_stp(&ctx->base_flow)) {
9efd308e
DV
3946 if (!xport_stp_should_forward_bpdu(xport) &&
3947 !xport_rstp_should_manage_bpdu(xport)) {
3948 if (ctx->xbridge->stp != NULL) {
2d9b49dd
BP
3949 xlate_report(ctx, OFT_WARN,
3950 "STP not in listening state, "
3951 "skipping bpdu output");
9efd308e 3952 } else if (ctx->xbridge->rstp != NULL) {
2d9b49dd
BP
3953 xlate_report(ctx, OFT_WARN,
3954 "RSTP not managing BPDU in this state, "
3955 "skipping bpdu output");
9efd308e 3956 }
081617f0 3957 return false;
0d1cee12 3958 }
67818616
MV
3959 } else if ((xport->cfm && cfm_should_process_flow(xport->cfm, flow, wc))
3960 || (xport->bfd && bfd_should_process_flow(xport->bfd, flow,
3961 wc))) {
3962 /* Pass; STP should not block link health detection. */
9efd308e
DV
3963 } else if (!xport_stp_forward_state(xport) ||
3964 !xport_rstp_forward_state(xport)) {
3965 if (ctx->xbridge->stp != NULL) {
2d9b49dd
BP
3966 xlate_report(ctx, OFT_WARN,
3967 "STP not in forwarding state, skipping output");
9efd308e 3968 } else if (ctx->xbridge->rstp != NULL) {
2d9b49dd
BP
3969 xlate_report(ctx, OFT_WARN,
3970 "RSTP not in forwarding state, skipping output");
9efd308e 3971 }
081617f0 3972 return false;
0d1cee12 3973 }
9583bc14 3974 }
5dbfe239
ZB
3975
3976 if (xport->pt_mode == NETDEV_PT_LEGACY_L2 &&
3977 flow->packet_type != htonl(PT_ETH)) {
3978 xlate_report(ctx, OFT_WARN, "Trying to send non-Ethernet packet "
3979 "through legacy L2 port. Dropping packet.");
3980 return false;
3981 }
3982
081617f0
JS
3983 return true;
3984}
3985
83c2757b
ZB
3986/* Function verifies if destination address of received Neighbor Advertisement
3987 * message stored in 'flow' is correct. It should be either FF02::1:FFXX:XXXX
3988 * where XX:XXXX stands for the last 24 bits of 'ipv6_addr' or it should match
3989 * 'ipv6_addr'. */
3990static bool
3991is_nd_dst_correct(const struct flow *flow, const struct in6_addr *ipv6_addr)
3992{
3993 const uint8_t *flow_ipv6_addr = (uint8_t *) &flow->ipv6_dst;
3994 const uint8_t *addr = (uint8_t *) ipv6_addr;
3995
68fd9251 3996 return (IN6_IS_ADDR_MC_LINKLOCAL(&flow->ipv6_dst) &&
83c2757b
ZB
3997 flow_ipv6_addr[11] == 0x01 &&
3998 flow_ipv6_addr[12] == 0xff &&
3999 flow_ipv6_addr[13] == addr[13] &&
4000 flow_ipv6_addr[14] == addr[14] &&
4001 flow_ipv6_addr[15] == addr[15]) ||
4002 IN6_ARE_ADDR_EQUAL(&flow->ipv6_dst, ipv6_addr);
4003}
4004
dc0bd12f
YS
4005static bool
4006is_neighbor_reply_matched(const struct flow *flow, struct in6_addr *ip_addr)
4007{
4008 return ((IN6_IS_ADDR_V4MAPPED(ip_addr) &&
4009 flow->dl_type == htons(ETH_TYPE_ARP) &&
4010 in6_addr_get_mapped_ipv4(ip_addr) == flow->nw_dst) ||
4011 (!IN6_IS_ADDR_V4MAPPED(ip_addr) &&
4012 is_nd_dst_correct(flow, ip_addr)));
4013}
4014
83c2757b
ZB
4015/* Function verifies if the ARP reply or Neighbor Advertisement represented by
4016 * 'flow' addresses the 'xbridge' of 'ctx'. Returns true if the ARP TA or
4017 * neighbor discovery destination is in the list of configured IP addresses of
4018 * the bridge. Otherwise, it returns false. */
4019static bool
4020is_neighbor_reply_correct(const struct xlate_ctx *ctx, const struct flow *flow)
4021{
4022 bool ret = false;
4023 int i;
4024 struct xbridge_addr *xbridge_addr = xbridge_addr_ref(ctx->xbridge->addr);
4025
4026 /* Verify if 'nw_dst' of ARP or 'ipv6_dst' of ICMPV6 is in the list. */
4027 for (i = 0; xbridge_addr && i < xbridge_addr->n_addr; i++) {
4028 struct in6_addr *ip_addr = &xbridge_addr->addr[i];
dc0bd12f 4029 if (is_neighbor_reply_matched(flow, ip_addr)) {
83c2757b
ZB
4030 /* Found a match. */
4031 ret = true;
4032 break;
4033 }
4034 }
4035
4036 xbridge_addr_unref(xbridge_addr);
dc0bd12f
YS
4037
4038 /* If not found in bridge's IPs, search in its ports. */
4039 if (!ret) {
4040 struct in6_addr *ip_addr, *mask;
4041 struct xport *port;
4042 int error, n_in6;
4043
4044 HMAP_FOR_EACH (port, ofp_node, &ctx->xbridge->xports) {
4045 error = netdev_get_addr_list(port->netdev, &ip_addr,
4046 &mask, &n_in6);
26228f44
DS
4047 if (!error) {
4048 ret = is_neighbor_reply_matched(flow, ip_addr);
4049 free(ip_addr);
4050 free(mask);
4051 if (ret) {
4052 /* Found a match. */
4053 break;
4054 }
dc0bd12f
YS
4055 }
4056 }
4057 }
83c2757b
ZB
4058 return ret;
4059}
4060
081617f0 4061static bool
dc0bd12f
YS
4062terminate_native_tunnel(struct xlate_ctx *ctx, struct flow *flow,
4063 struct flow_wildcards *wc, odp_port_t *tnl_port)
081617f0
JS
4064{
4065 *tnl_port = ODPP_NONE;
4066
4067 /* XXX: Write better Filter for tunnel port. We can use in_port
4068 * in tunnel-port flow to avoid these checks completely. */
dc0bd12f 4069 if (ovs_native_tunneling_is_on(ctx->xbridge->ofproto)) {
081617f0 4070 *tnl_port = tnl_port_map_lookup(flow, wc);
83c2757b
ZB
4071
4072 /* If no tunnel port was found and it's about an ARP or ICMPv6 packet,
4073 * do tunnel neighbor snooping. */
4074 if (*tnl_port == ODPP_NONE &&
4075 (flow->dl_type == htons(ETH_TYPE_ARP) ||
4076 flow->nw_proto == IPPROTO_ICMPV6) &&
4077 is_neighbor_reply_correct(ctx, flow)) {
4078 tnl_neigh_snoop(flow, wc, ctx->xbridge->name);
4079 }
081617f0
JS
4080 }
4081
4082 return *tnl_port != ODPP_NONE;
4083}
4084
4085static void
4086compose_output_action__(struct xlate_ctx *ctx, ofp_port_t ofp_port,
feee58b9 4087 const struct xlate_bond_recirc *xr, bool check_stp,
11938578 4088 bool is_last_action OVS_UNUSED, bool truncate)
081617f0
JS
4089{
4090 const struct xport *xport = get_ofp_port(ctx->xbridge, ofp_port);
4091 struct flow_wildcards *wc = ctx->wc;
4092 struct flow *flow = &ctx->xin->flow;
4093 struct flow_tnl flow_tnl;
4094 union flow_vlan_hdr flow_vlans[FLOW_MAX_VLAN_HEADERS];
4095 uint8_t flow_nw_tos;
4096 odp_port_t out_port, odp_port, odp_tnl_port;
4097 bool is_native_tunnel = false;
4098 uint8_t dscp;
5dbfe239
ZB
4099 struct eth_addr flow_dl_dst = flow->dl_dst;
4100 struct eth_addr flow_dl_src = flow->dl_src;
4101 ovs_be32 flow_packet_type = flow->packet_type;
4102 ovs_be16 flow_dl_type = flow->dl_type;
081617f0
JS
4103
4104 /* If 'struct flow' gets additional metadata, we'll need to zero it out
4105 * before traversing a patch port. */
7dc18ae9 4106 BUILD_ASSERT_DECL(FLOW_WC_SEQ == 41);
081617f0
JS
4107 memset(&flow_tnl, 0, sizeof flow_tnl);
4108
4109 if (!check_output_prerequisites(ctx, xport, flow, check_stp)) {
4110 return;
4111 }
9583bc14 4112
875ab130
BP
4113 if (flow->packet_type == htonl(PT_ETH)) {
4114 /* Strip Ethernet header for legacy L3 port. */
4115 if (xport->pt_mode == NETDEV_PT_LEGACY_L3) {
4116 flow->packet_type = PACKET_TYPE_BE(OFPHTN_ETHERTYPE,
4117 ntohs(flow->dl_type));
4118 }
beb75a40
JS
4119 }
4120
46c88433 4121 if (xport->peer) {
5cb92182
BP
4122 if (truncate) {
4123 xlate_report_error(ctx, "Cannot truncate output to patch port");
4124 }
48f704f4 4125 patch_port_output(ctx, xport, xport->peer);
8bdb2bdb 4126 return;
9583bc14
EJ
4127 }
4128
f0fb825a 4129 memcpy(flow_vlans, flow->vlans, sizeof flow_vlans);
33bf9176 4130 flow_nw_tos = flow->nw_tos;
9583bc14 4131
16194afd
DDP
4132 if (count_skb_priorities(xport)) {
4133 memset(&wc->masks.skb_priority, 0xff, sizeof wc->masks.skb_priority);
4134 if (dscp_from_skb_priority(xport, flow->skb_priority, &dscp)) {
4135 wc->masks.nw_tos |= IP_DSCP_MASK;
4136 flow->nw_tos &= ~IP_DSCP_MASK;
4137 flow->nw_tos |= dscp;
4138 }
9583bc14
EJ
4139 }
4140
46c88433 4141 if (xport->is_tunnel) {
c2b878e0 4142 struct in6_addr dst;
9583bc14
EJ
4143 /* Save tunnel metadata so that changes made due to
4144 * the Logical (tunnel) Port are not visible for any further
4145 * matches, while explicit set actions on tunnel metadata are.
4146 */
a36de779 4147 flow_tnl = flow->tunnel;
49a73e0c 4148 odp_port = tnl_port_send(xport->ofport, flow, ctx->wc);
4e022ec0 4149 if (odp_port == ODPP_NONE) {
2d9b49dd 4150 xlate_report(ctx, OFT_WARN, "Tunneling decided against output");
9583bc14
EJ
4151 goto out; /* restore flow_nw_tos */
4152 }
c2b878e0
TLSC
4153 dst = flow_tnl_dst(&flow->tunnel);
4154 if (ipv6_addr_equals(&dst, &ctx->orig_tunnel_ipv6_dst)) {
2d9b49dd 4155 xlate_report(ctx, OFT_WARN, "Not tunneling to our own address");
9583bc14
EJ
4156 goto out; /* restore flow_nw_tos */
4157 }
4158 if (ctx->xin->resubmit_stats) {
46c88433 4159 netdev_vport_inc_tx(xport->netdev, ctx->xin->resubmit_stats);
9583bc14 4160 }
b256dc52
JS
4161 if (ctx->xin->xcache) {
4162 struct xc_entry *entry;
4163
4164 entry = xlate_cache_add_entry(ctx->xin->xcache, XC_NETDEV);
901a517e 4165 entry->dev.tx = netdev_ref(xport->netdev);
b256dc52 4166 }
9583bc14 4167 out_port = odp_port;
a36de779 4168 if (ovs_native_tunneling_is_on(ctx->xbridge->ofproto)) {
2d9b49dd 4169 xlate_report(ctx, OFT_DETAIL, "output to native tunnel");
081617f0 4170 is_native_tunnel = true;
a36de779 4171 } else {
c6d87201
WT
4172 const char *tnl_type;
4173
2d9b49dd 4174 xlate_report(ctx, OFT_DETAIL, "output to kernel tunnel");
c6d87201
WT
4175 tnl_type = tnl_port_get_type(xport->ofport);
4176 commit_odp_tunnel_action(flow, &ctx->base_flow,
4177 ctx->odp_actions, tnl_type);
a36de779
PS
4178 flow->tunnel = flow_tnl; /* Restore tunnel metadata */
4179 }
9583bc14 4180 } else {
46c88433 4181 odp_port = xport->odp_port;
7614e5d0 4182 out_port = odp_port;
9583bc14 4183 }
9583bc14 4184
4e022ec0 4185 if (out_port != ODPP_NONE) {
081617f0 4186 /* Commit accumulated flow updates before output. */
704bb0bf 4187 xlate_commit_actions(ctx);
adcf00ba 4188
e93ef1c7 4189 if (xr) {
081617f0 4190 /* Recirculate the packet. */
347bf289 4191 struct ovs_action_hash *act_hash;
adcf00ba 4192
347bf289 4193 /* Hash action. */
6a0b0d3b
JS
4194 enum ovs_hash_alg hash_alg = xr->hash_alg;
4195 if (hash_alg > ctx->xbridge->support.max_hash_alg) {
4196 /* Algorithm supported by all datapaths. */
4197 hash_alg = OVS_HASH_ALG_L4;
4198 }
1520ef4f 4199 act_hash = nl_msg_put_unspec_uninit(ctx->odp_actions,
347bf289
AZ
4200 OVS_ACTION_ATTR_HASH,
4201 sizeof *act_hash);
6a0b0d3b 4202 act_hash->hash_alg = hash_alg;
62ac1f20 4203 act_hash->hash_basis = xr->hash_basis;
347bf289
AZ
4204
4205 /* Recirc action. */
1520ef4f 4206 nl_msg_put_u32(ctx->odp_actions, OVS_ACTION_ATTR_RECIRC,
347bf289 4207 xr->recirc_id);
081617f0
JS
4208 } else if (is_native_tunnel) {
4209 /* Output to native tunnel port. */
11938578 4210 native_tunnel_output(ctx, xport, flow, odp_port, truncate);
081617f0 4211 flow->tunnel = flow_tnl; /* Restore tunnel metadata */
a36de779 4212
dc0bd12f 4213 } else if (terminate_native_tunnel(ctx, flow, wc,
081617f0
JS
4214 &odp_tnl_port)) {
4215 /* Intercept packet to be received on native tunnel port. */
4216 nl_msg_put_odp_port(ctx->odp_actions, OVS_ACTION_ATTR_TUNNEL_POP,
4217 odp_tnl_port);
a36de779 4218
081617f0
JS
4219 } else {
4220 /* Tunnel push-pop action is not compatible with
4221 * IPFIX action. */
4222 compose_ipfix_action(ctx, out_port);
4223
4224 /* Handle truncation of the mirrored packet. */
4225 if (ctx->mirror_snaplen > 0 &&
4226 ctx->mirror_snaplen < UINT16_MAX) {
4227 struct ovs_action_trunc *trunc;
4228
4229 trunc = nl_msg_put_unspec_uninit(ctx->odp_actions,
4230 OVS_ACTION_ATTR_TRUNC,
4231 sizeof *trunc);
4232 trunc->max_len = ctx->mirror_snaplen;
4233 if (!ctx->xbridge->support.trunc) {
4234 ctx->xout->slow |= SLOW_ACTION;
1356dbd1
WT
4235 }
4236 }
081617f0
JS
4237
4238 nl_msg_put_odp_port(ctx->odp_actions,
4239 OVS_ACTION_ATTR_OUTPUT,
4240 out_port);
adcf00ba 4241 }
9583bc14 4242
6cbbf4fa
EJ
4243 ctx->sflow_odp_port = odp_port;
4244 ctx->sflow_n_outputs++;
2031ef97 4245 ctx->nf_output_iface = ofp_port;
6cbbf4fa
EJ
4246 }
4247
7efbc3b7
BP
4248 if (mbridge_has_mirrors(ctx->xbridge->mbridge) && xport->xbundle) {
4249 mirror_packet(ctx, xport->xbundle,
4250 xbundle_mirror_dst(xport->xbundle->xbridge,
4251 xport->xbundle));
4252 }
4253
6cbbf4fa 4254 out:
9583bc14 4255 /* Restore flow */
f0fb825a 4256 memcpy(flow->vlans, flow_vlans, sizeof flow->vlans);
33bf9176 4257 flow->nw_tos = flow_nw_tos;
5dbfe239
ZB
4258 flow->dl_dst = flow_dl_dst;
4259 flow->dl_src = flow_dl_src;
4260 flow->packet_type = flow_packet_type;
4261 flow->dl_type = flow_dl_type;
9583bc14
EJ
4262}
4263
4264static void
e93ef1c7 4265compose_output_action(struct xlate_ctx *ctx, ofp_port_t ofp_port,
feee58b9 4266 const struct xlate_bond_recirc *xr,
11938578 4267 bool is_last_action, bool truncate)
9583bc14 4268{
11938578
AZ
4269 compose_output_action__(ctx, ofp_port, xr, true,
4270 is_last_action, truncate);
9583bc14
EJ
4271}
4272
bb61b33d 4273static void
feee58b9 4274xlate_recursively(struct xlate_ctx *ctx, struct rule_dpif *rule,
96c3a6e5
AZ
4275 bool deepens, bool is_last_action,
4276 xlate_actions_handler *actions_xlator)
bb61b33d
BP
4277{
4278 struct rule_dpif *old_rule = ctx->rule;
8b1e5560 4279 ovs_be64 old_cookie = ctx->rule_cookie;
dc723c44 4280 const struct rule_actions *actions;
bb61b33d
BP
4281
4282 if (ctx->xin->resubmit_stats) {
16441315 4283 rule_dpif_credit_stats(rule, ctx->xin->resubmit_stats, false);
bb61b33d
BP
4284 }
4285
98b07853 4286 ctx->resubmits++;
790c5d26 4287
790c5d26 4288 ctx->depth += deepens;
bb61b33d 4289 ctx->rule = rule;
07a3cd5c
BP
4290 ctx->rule_cookie = rule->up.flow_cookie;
4291 actions = rule_get_actions(&rule->up);
96c3a6e5 4292 actions_xlator(actions->ofpacts, actions->ofpacts_len, ctx,
f5634764 4293 is_last_action, false);
8b1e5560 4294 ctx->rule_cookie = old_cookie;
bb61b33d 4295 ctx->rule = old_rule;
790c5d26 4296 ctx->depth -= deepens;
bb61b33d
BP
4297}
4298
bd3240ba
SH
4299static bool
4300xlate_resubmit_resource_check(struct xlate_ctx *ctx)
9583bc14 4301{
790c5d26 4302 if (ctx->depth >= MAX_DEPTH) {
2d9b49dd 4303 xlate_report_error(ctx, "over max translation depth %d", MAX_DEPTH);
fff1b9c0 4304 ctx->error = XLATE_RECURSION_TOO_DEEP;
790c5d26 4305 } else if (ctx->resubmits >= MAX_RESUBMITS) {
2d9b49dd 4306 xlate_report_error(ctx, "over %d resubmit actions", MAX_RESUBMITS);
fff1b9c0 4307 ctx->error = XLATE_TOO_MANY_RESUBMITS;
1520ef4f 4308 } else if (ctx->odp_actions->size > UINT16_MAX) {
2d9b49dd 4309 xlate_report_error(ctx, "resubmits yielded over 64 kB of actions");
fff1b9c0
JR
4310 /* NOT an error, as we'll be slow-pathing the flow in this case? */
4311 ctx->exit = true; /* XXX: translation still terminated! */
6fd6ed71 4312 } else if (ctx->stack.size >= 65536) {
2d9b49dd 4313 xlate_report_error(ctx, "resubmits yielded over 64 kB of stack");
fff1b9c0 4314 ctx->error = XLATE_STACK_TOO_DEEP;
98b07853 4315 } else {
bd3240ba
SH
4316 return true;
4317 }
4318
4319 return false;
4320}
4321
2cd20955
JR
4322static void
4323tuple_swap_flow(struct flow *flow, bool ipv4)
4324{
4325 uint8_t nw_proto = flow->nw_proto;
4326 flow->nw_proto = flow->ct_nw_proto;
4327 flow->ct_nw_proto = nw_proto;
4328
4329 if (ipv4) {
4330 ovs_be32 nw_src = flow->nw_src;
4331 flow->nw_src = flow->ct_nw_src;
4332 flow->ct_nw_src = nw_src;
4333
4334 ovs_be32 nw_dst = flow->nw_dst;
4335 flow->nw_dst = flow->ct_nw_dst;
4336 flow->ct_nw_dst = nw_dst;
4337 } else {
4338 struct in6_addr ipv6_src = flow->ipv6_src;
4339 flow->ipv6_src = flow->ct_ipv6_src;
4340 flow->ct_ipv6_src = ipv6_src;
4341
4342 struct in6_addr ipv6_dst = flow->ipv6_dst;
4343 flow->ipv6_dst = flow->ct_ipv6_dst;
4344 flow->ct_ipv6_dst = ipv6_dst;
4345 }
4346
4347 ovs_be16 tp_src = flow->tp_src;
4348 flow->tp_src = flow->ct_tp_src;
4349 flow->ct_tp_src = tp_src;
4350
4351 ovs_be16 tp_dst = flow->tp_dst;
4352 flow->tp_dst = flow->ct_tp_dst;
4353 flow->ct_tp_dst = tp_dst;
4354}
4355
4356static void
4357tuple_swap(struct flow *flow, struct flow_wildcards *wc)
4358{
4359 bool ipv4 = (flow->dl_type == htons(ETH_TYPE_IP));
4360
4361 tuple_swap_flow(flow, ipv4);
4362 tuple_swap_flow(&wc->masks, ipv4);
4363}
4364
bd3240ba 4365static void
6d328fa2 4366xlate_table_action(struct xlate_ctx *ctx, ofp_port_t in_port, uint8_t table_id,
2cd20955 4367 bool may_packet_in, bool honor_table_miss,
96c3a6e5
AZ
4368 bool with_ct_orig, bool is_last_action,
4369 xlate_actions_handler *xlator)
bd3240ba 4370{
e12ec36b
SH
4371 /* Check if we need to recirculate before matching in a table. */
4372 if (ctx->was_mpls) {
4373 ctx_trigger_freeze(ctx);
4374 return;
4375 }
bd3240ba 4376 if (xlate_resubmit_resource_check(ctx)) {
9583bc14 4377 uint8_t old_table_id = ctx->table_id;
3f207910 4378 struct rule_dpif *rule;
9583bc14
EJ
4379
4380 ctx->table_id = table_id;
4381
2cd20955
JR
4382 /* Swap packet fields with CT 5-tuple if requested. */
4383 if (with_ct_orig) {
4384 /* Do not swap if there is no CT tuple, or if key is not IP. */
4385 if (ctx->xin->flow.ct_nw_proto == 0 ||
4386 !is_ip_any(&ctx->xin->flow)) {
4387 xlate_report_error(ctx,
4388 "resubmit(ct) with non-tracked or non-IP packet!");
7b950521 4389 ctx->table_id = old_table_id;
2cd20955
JR
4390 return;
4391 }
4392 tuple_swap(&ctx->xin->flow, ctx->wc);
4393 }
34dd0d78 4394 rule = rule_dpif_lookup_from_table(ctx->xbridge->ofproto,
1f4a8933 4395 ctx->xin->tables_version,
c0e638aa 4396 &ctx->xin->flow, ctx->wc,
34dd0d78
JR
4397 ctx->xin->resubmit_stats,
4398 &ctx->table_id, in_port,
a027899e
JR
4399 may_packet_in, honor_table_miss,
4400 ctx->xin->xcache);
2cd20955
JR
4401 /* Swap back. */
4402 if (with_ct_orig) {
4403 tuple_swap(&ctx->xin->flow, ctx->wc);
4404 }
ad3efdcb 4405
a2143702 4406 if (rule) {
83709dfa
JR
4407 /* Fill in the cache entry here instead of xlate_recursively
4408 * to make the reference counting more explicit. We take a
4409 * reference in the lookups above if we are going to cache the
4410 * rule. */
4411 if (ctx->xin->xcache) {
4412 struct xc_entry *entry;
4413
4414 entry = xlate_cache_add_entry(ctx->xin->xcache, XC_RULE);
901a517e 4415 entry->rule = rule;
07a3cd5c 4416 ofproto_rule_ref(&rule->up);
83709dfa 4417 }
2d9b49dd
BP
4418
4419 struct ovs_list *old_trace = ctx->xin->trace;
4420 xlate_report_table(ctx, rule, table_id);
feee58b9 4421 xlate_recursively(ctx, rule, table_id <= old_table_id,
96c3a6e5 4422 is_last_action, xlator);
2d9b49dd 4423 ctx->xin->trace = old_trace;
ad3efdcb
EJ
4424 }
4425
9583bc14 4426 ctx->table_id = old_table_id;
98b07853 4427 return;
9583bc14
EJ
4428 }
4429}
4430
76973237 4431/* Consumes the group reference, which is only taken if xcache exists. */
f4fb341b 4432static void
1e684d7d
RW
4433xlate_group_stats(struct xlate_ctx *ctx, struct group_dpif *group,
4434 struct ofputil_bucket *bucket)
4435{
4436 if (ctx->xin->resubmit_stats) {
4437 group_dpif_credit_stats(group, bucket, ctx->xin->resubmit_stats);
4438 }
4439 if (ctx->xin->xcache) {
4440 struct xc_entry *entry;
4441
4442 entry = xlate_cache_add_entry(ctx->xin->xcache, XC_GROUP);
901a517e
JR
4443 entry->group.group = group;
4444 entry->group.bucket = bucket;
1e684d7d
RW
4445 }
4446}
4447
4448static void
feee58b9
AZ
4449xlate_group_bucket(struct xlate_ctx *ctx, struct ofputil_bucket *bucket,
4450 bool is_last_action)
f4fb341b 4451{
fac4786a
BP
4452 struct ovs_list *old_trace = ctx->xin->trace;
4453 if (OVS_UNLIKELY(ctx->xin->trace)) {
4454 char *s = xasprintf("bucket %"PRIu32, bucket->bucket_id);
4455 ctx->xin->trace = &oftrace_report(ctx->xin->trace, OFT_BUCKET,
4456 s)->subs;
4457 free(s);
4458 }
4459
f4fb341b 4460 uint64_t action_list_stub[1024 / 8];
0a2869d5
BP
4461 struct ofpbuf action_list = OFPBUF_STUB_INITIALIZER(action_list_stub);
4462 struct ofpbuf action_set = ofpbuf_const_initializer(bucket->ofpacts,
4463 bucket->ofpacts_len);
5b09e569 4464 struct flow old_flow = ctx->xin->flow;
e12ec36b 4465 bool old_was_mpls = ctx->was_mpls;
f4fb341b 4466
f4fb341b 4467 ofpacts_execute_action_set(&action_list, &action_set);
790c5d26 4468 ctx->depth++;
f5634764
KG
4469 do_xlate_actions(action_list.data, action_list.size, ctx, is_last_action,
4470 true);
790c5d26 4471 ctx->depth--;
f4fb341b 4472
f4fb341b 4473 ofpbuf_uninit(&action_list);
5b09e569 4474
77ab5fd2 4475 /* Check if need to freeze. */
1d361a81 4476 if (ctx->freezing) {
77ab5fd2 4477 finish_freezing(ctx);
e672ff9b
JR
4478 }
4479
5b09e569
JR
4480 /* Roll back flow to previous state.
4481 * This is equivalent to cloning the packet for each bucket.
4482 *
4483 * As a side effect any subsequently applied actions will
4484 * also effectively be applied to a clone of the packet taken
4485 * just before applying the all or indirect group.
4486 *
4487 * Note that group buckets are action sets, hence they cannot modify the
4488 * main action set. Also any stack actions are ignored when executing an
afc0c379 4489 * action set, so group buckets cannot directly change the stack either.
5b09e569 4490 * However, we do allow resubmit actions in group buckets, which could
afc0c379
BP
4491 * recursively execute actions that do modify the action set or change the
4492 * stack. The controller must be careful about what it does to the
4493 * action_set and stack in the tables resubmitted to from group buckets. */
5b09e569
JR
4494 ctx->xin->flow = old_flow;
4495
e12ec36b
SH
4496 /* The group bucket popping MPLS should have no effect after bucket
4497 * execution. */
4498 ctx->was_mpls = old_was_mpls;
4499
5b09e569
JR
4500 /* The fact that the group bucket exits (for any reason) does not mean that
4501 * the translation after the group action should exit. Specifically, if
1d361a81
BP
4502 * the group bucket freezes translation, the actions after the group action
4503 * must continue processing with the original, not the frozen packet! */
5b09e569 4504 ctx->exit = false;
b99654b0
VDA
4505
4506 /* Context error in a bucket should not impact processing of other buckets
4507 * or actions. This is similar to cloning a packet for group buckets.
4508 * There is no need to restore the error back to old value due to the fact
4509 * that we actually processed group action which can happen only when there
4510 * is no previous context error.
4511 *
4512 * Exception to above is errors which are system limits to protect
4513 * translation from running too long or occupy too much space. These errors
4514 * should not be masked. XLATE_RECURSION_TOO_DEEP, XLATE_TOO_MANY_RESUBMITS
4515 * and XLATE_STACK_TOO_DEEP fall in this category. */
4516 if (ctx->error == XLATE_TOO_MANY_MPLS_LABELS ||
4517 ctx->error == XLATE_UNSUPPORTED_PACKET_TYPE) {
4518 /* reset the error and continue processing other buckets */
4519 ctx->error = XLATE_OK;
4520 }
f4fb341b 4521
fac4786a 4522 ctx->xin->trace = old_trace;
f4fb341b
SH
4523}
4524
a04e5888
BP
4525static struct ofputil_bucket *
4526pick_ff_group(struct xlate_ctx *ctx, struct group_dpif *group)
dd8cd4b4 4527{
a04e5888 4528 return group_first_live_bucket(ctx, group, 0);
dd8cd4b4
SH
4529}
4530
a04e5888
BP
4531static struct ofputil_bucket *
4532pick_default_select_group(struct xlate_ctx *ctx, struct group_dpif *group)
fe7e5749 4533{
a04e5888
BP
4534 flow_mask_hash_fields(&ctx->xin->flow, ctx->wc,
4535 NX_HASH_FIELDS_SYMMETRIC_L4);
4536 return group_best_live_bucket(ctx, group,
4537 flow_hash_symmetric_l4(&ctx->xin->flow, 0));
fe7e5749
SH
4538}
4539
a04e5888
BP
4540static struct ofputil_bucket *
4541pick_hash_fields_select_group(struct xlate_ctx *ctx, struct group_dpif *group)
0c4b9393 4542{
07a3cd5c
BP
4543 const struct field_array *fields = &group->up.props.fields;
4544 const uint8_t *mask_values = fields->values;
4545 uint32_t basis = hash_uint64(group->up.props.selection_method_param);
0c4b9393 4546
07a3cd5c 4547 size_t i;
e8dba719
JR
4548 BITMAP_FOR_EACH_1 (i, MFF_N_IDS, fields->used.bm) {
4549 const struct mf_field *mf = mf_from_id(i);
0c4b9393 4550
5bcd4754 4551 /* Skip fields for which prerequisites are not met. */
e8dba719
JR
4552 if (!mf_are_prereqs_ok(mf, &ctx->xin->flow, ctx->wc)) {
4553 /* Skip the mask bytes for this field. */
4554 mask_values += mf->n_bytes;
4555 continue;
4556 }
0c4b9393 4557
e8dba719
JR
4558 union mf_value value;
4559 union mf_value mask;
0c4b9393 4560
e8dba719
JR
4561 mf_get_value(mf, &ctx->xin->flow, &value);
4562 /* Mask the value. */
4563 for (int j = 0; j < mf->n_bytes; j++) {
4564 mask.b[j] = *mask_values++;
4565 value.b[j] &= mask.b[j];
4566 }
4567 basis = hash_bytes(&value, mf->n_bytes, basis);
1cb20095 4568
e8dba719
JR
4569 /* For tunnels, hash in whether the field is present. */
4570 if (mf_is_tun_metadata(mf)) {
4571 basis = hash_boolean(mf_is_set(mf, &ctx->xin->flow), basis);
0c4b9393 4572 }
e8dba719
JR
4573
4574 mf_mask_field_masked(mf, &mask, ctx->wc);
0c4b9393
SH
4575 }
4576
a04e5888 4577 return group_best_live_bucket(ctx, group, basis);
0c4b9393
SH
4578}
4579
a04e5888
BP
4580static struct ofputil_bucket *
4581pick_dp_hash_select_group(struct xlate_ctx *ctx, struct group_dpif *group)
53cc166a 4582{
2e3fd24c
JS
4583 uint32_t dp_hash = ctx->xin->flow.dp_hash;
4584
53cc166a
JR
4585 /* dp_hash value 0 is special since it means that the dp_hash has not been
4586 * computed, as all computed dp_hash values are non-zero. Therefore
4587 * compare to zero can be used to decide if the dp_hash value is valid
4588 * without masking the dp_hash field. */
2e3fd24c
JS
4589 if (!dp_hash) {
4590 enum ovs_hash_alg hash_alg = group->hash_alg;
4591 if (hash_alg > ctx->xbridge->support.max_hash_alg) {
4592 /* Algorithm supported by all datapaths. */
4593 hash_alg = OVS_HASH_ALG_L4;
4594 }
4595 ctx_trigger_recirculate_with_hash(ctx, hash_alg, group->hash_basis);
a04e5888 4596 return NULL;
53cc166a 4597 } else {
2e3fd24c
JS
4598 uint32_t hash_mask = group->hash_mask;
4599 ctx->wc->masks.dp_hash |= hash_mask;
4600
4601 /* Starting from the original masked dp_hash value iterate over the
4602 * hash mapping table to find the first live bucket. As the buckets
4603 * are quasi-randomly spread over the hash values, this maintains
4604 * a distribution according to bucket weights even when some buckets
4605 * are non-live. */
4606 for (int i = 0; i <= hash_mask; i++) {
4607 struct ofputil_bucket *b =
4608 group->hash_map[(dp_hash + i) & hash_mask];
4609 if (bucket_is_alive(ctx, b, 0)) {
4610 return b;
4611 }
53cc166a 4612 }
2e3fd24c 4613
a04e5888 4614 return NULL;
53cc166a
JR
4615 }
4616}
4617
a04e5888
BP
4618static struct ofputil_bucket *
4619pick_select_group(struct xlate_ctx *ctx, struct group_dpif *group)
7565c3e4 4620{
e12ec36b
SH
4621 /* Select groups may access flow keys beyond L2 in order to
4622 * select a bucket. Recirculate as appropriate to make this possible.
4623 */
4624 if (ctx->was_mpls) {
4625 ctx_trigger_freeze(ctx);
991f1f63 4626 return NULL;
e12ec36b
SH
4627 }
4628
2e3fd24c
JS
4629 switch (group->selection_method) {
4630 case SEL_METHOD_DEFAULT:
a04e5888 4631 return pick_default_select_group(ctx, group);
2e3fd24c
JS
4632 break;
4633 case SEL_METHOD_HASH:
a04e5888 4634 return pick_hash_fields_select_group(ctx, group);
2e3fd24c
JS
4635 break;
4636 case SEL_METHOD_DP_HASH:
a04e5888 4637 return pick_dp_hash_select_group(ctx, group);
2e3fd24c
JS
4638 break;
4639 default:
4640 /* Parsing of groups ensures this never happens */
7565c3e4
SH
4641 OVS_NOT_REACHED();
4642 }
2e3fd24c
JS
4643
4644 return NULL;
7565c3e4
SH
4645}
4646
f4fb341b 4647static void
feee58b9
AZ
4648xlate_group_action__(struct xlate_ctx *ctx, struct group_dpif *group,
4649 bool is_last_action)
f4fb341b 4650{
a04e5888 4651 if (group->up.type == OFPGT11_ALL || group->up.type == OFPGT11_INDIRECT) {
03121ac4 4652 struct ovs_list *last_bucket = group->up.buckets.prev;
a04e5888
BP
4653 struct ofputil_bucket *bucket;
4654 LIST_FOR_EACH (bucket, list_node, &group->up.buckets) {
4655 bool is_last_bucket = &bucket->list_node == last_bucket;
4656 xlate_group_bucket(ctx, bucket, is_last_action && is_last_bucket);
4657 }
4658 xlate_group_stats(ctx, group, NULL);
4659 } else {
4660 struct ofputil_bucket *bucket;
4661 if (group->up.type == OFPGT11_SELECT) {
4662 bucket = pick_select_group(ctx, group);
4663 } else if (group->up.type == OFPGT11_FF) {
4664 bucket = pick_ff_group(ctx, group);
4665 } else {
4666 OVS_NOT_REACHED();
4667 }
fac4786a 4668
a04e5888 4669 if (bucket) {
fac4786a
BP
4670 xlate_report(ctx, OFT_DETAIL, "using bucket %"PRIu32,
4671 bucket->bucket_id);
a04e5888
BP
4672 xlate_group_bucket(ctx, bucket, is_last_action);
4673 xlate_group_stats(ctx, group, bucket);
fac4786a
BP
4674 } else {
4675 xlate_report(ctx, OFT_DETAIL, "no live bucket");
4676 if (ctx->xin->xcache) {
4677 ofproto_group_unref(&group->up);
4678 }
a04e5888 4679 }
f4fb341b 4680 }
f4fb341b
SH
4681}
4682
4683static bool
feee58b9
AZ
4684xlate_group_action(struct xlate_ctx *ctx, uint32_t group_id,
4685 bool is_last_action)
f4fb341b 4686{
0eb48fe1 4687 if (xlate_resubmit_resource_check(ctx)) {
f4fb341b 4688 struct group_dpif *group;
f4fb341b 4689
76973237
JR
4690 /* Take ref only if xcache exists. */
4691 group = group_dpif_lookup(ctx->xbridge->ofproto, group_id,
1f4a8933 4692 ctx->xin->tables_version, ctx->xin->xcache);
db88b35c
JR
4693 if (!group) {
4694 /* XXX: Should set ctx->error ? */
2d9b49dd
BP
4695 xlate_report(ctx, OFT_WARN, "output to nonexistent group %"PRIu32,
4696 group_id);
f4fb341b
SH
4697 return true;
4698 }
feee58b9 4699 xlate_group_action__(ctx, group, is_last_action);
f4fb341b
SH
4700 }
4701
4702 return false;
4703}
4704
9583bc14
EJ
4705static void
4706xlate_ofpact_resubmit(struct xlate_ctx *ctx,
feee58b9
AZ
4707 const struct ofpact_resubmit *resubmit,
4708 bool is_last_action)
9583bc14 4709{
4e022ec0 4710 ofp_port_t in_port;
9583bc14 4711 uint8_t table_id;
adcf00ba
AZ
4712 bool may_packet_in = false;
4713 bool honor_table_miss = false;
4714
4715 if (ctx->rule && rule_dpif_is_internal(ctx->rule)) {
4716 /* Still allow missed packets to be sent to the controller
4717 * if resubmitting from an internal table. */
4718 may_packet_in = true;
4719 honor_table_miss = true;
4720 }
9583bc14
EJ
4721
4722 in_port = resubmit->in_port;
4723 if (in_port == OFPP_IN_PORT) {
4e022ec0 4724 in_port = ctx->xin->flow.in_port.ofp_port;
9583bc14
EJ
4725 }
4726
4727 table_id = resubmit->table_id;
4728 if (table_id == 255) {
4729 table_id = ctx->table_id;
4730 }
4731
adcf00ba 4732 xlate_table_action(ctx, in_port, table_id, may_packet_in,
feee58b9 4733 honor_table_miss, resubmit->with_ct_orig,
96c3a6e5 4734 is_last_action, do_xlate_actions);
9583bc14
EJ
4735}
4736
4737static void
feee58b9
AZ
4738flood_packet_to_port(struct xlate_ctx *ctx, const struct xport *xport,
4739 bool all, bool is_last_action)
9583bc14 4740{
feee58b9
AZ
4741 if (!xport) {
4742 return;
4743 }
4744
4745 if (all) {
4746 compose_output_action__(ctx, xport->ofp_port, NULL, false,
11938578 4747 is_last_action, false);
feee58b9 4748 } else {
11938578
AZ
4749 compose_output_action(ctx, xport->ofp_port, NULL, is_last_action,
4750 false);
feee58b9
AZ
4751 }
4752}
4753
4754static void
4755flood_packets(struct xlate_ctx *ctx, bool all, bool is_last_action)
4756{
4757 const struct xport *xport, *last = NULL;
9583bc14 4758
feee58b9 4759 /* Use 'last' the keep track of the last output port. */
46c88433
EJ
4760 HMAP_FOR_EACH (xport, ofp_node, &ctx->xbridge->xports) {
4761 if (xport->ofp_port == ctx->xin->flow.in_port.ofp_port) {
9583bc14
EJ
4762 continue;
4763 }
4764
feee58b9
AZ
4765 if (all || !(xport->config & OFPUTIL_PC_NO_FLOOD)) {
4766 /* 'last' is not the last port, send a packet out, and
4767 * update 'last'. */
4768 flood_packet_to_port(ctx, last, all, false);
4769 last = xport;
9583bc14
EJ
4770 }
4771 }
4772
feee58b9
AZ
4773 /* Send the packet to the 'last' port. */
4774 flood_packet_to_port(ctx, last, all, is_last_action);
2031ef97 4775 ctx->nf_output_iface = NF_OUT_FLOOD;
9583bc14
EJ
4776}
4777
74c4530d
JP
4778static void
4779put_controller_user_action(struct xlate_ctx *ctx,
4780 bool dont_send, bool continuation,
4781 uint32_t recirc_id, int len,
4782 enum ofp_packet_in_reason reason,
4783 uint16_t controller_id)
4784{
4785 struct user_action_cookie cookie;
4786
4787 memset(&cookie, 0, sizeof cookie);
4788 cookie.type = USER_ACTION_COOKIE_CONTROLLER;
4789 cookie.ofp_in_port = OFPP_NONE,
4790 cookie.ofproto_uuid = ctx->xbridge->ofproto->uuid;
4791 cookie.controller.dont_send = dont_send;
4792 cookie.controller.continuation = continuation;
4793 cookie.controller.reason = reason;
4794 cookie.controller.recirc_id = recirc_id;
4795 put_32aligned_be64(&cookie.controller.rule_cookie, ctx->rule_cookie);
4796 cookie.controller.controller_id = controller_id;
4797 cookie.controller.max_len = len;
4798
4799 odp_port_t odp_port = ofp_port_to_odp_port(ctx->xbridge,
4800 ctx->xin->flow.in_port.ofp_port);
769b5034 4801 uint32_t pid = dpif_port_get_pid(ctx->xbridge->dpif, odp_port);
74c4530d
JP
4802 odp_put_userspace_action(pid, &cookie, sizeof cookie, ODPP_NONE,
4803 false, ctx->odp_actions);
4804}
4805
9583bc14 4806static void
d39ec23d
JP
4807xlate_controller_action(struct xlate_ctx *ctx, int len,
4808 enum ofp_packet_in_reason reason,
4809 uint16_t controller_id,
206ddb9a 4810 uint32_t provider_meter_id,
d39ec23d 4811 const uint8_t *userdata, size_t userdata_len)
9583bc14 4812{
b476e2f2 4813 xlate_commit_actions(ctx);
beb75a40 4814
9bfe9334
BP
4815 /* A packet sent by an action in a table-miss rule is considered an
4816 * explicit table miss. OpenFlow before 1.3 doesn't have that concept so
4817 * it will get translated back to OFPR_ACTION for those versions. */
4818 if (reason == OFPR_ACTION
07a3cd5c 4819 && ctx->rule && rule_is_table_miss(&ctx->rule->up)) {
9bfe9334
BP
4820 reason = OFPR_EXPLICIT_MISS;
4821 }
4822
d39ec23d
JP
4823 struct frozen_state state = {
4824 .table_id = ctx->table_id,
4825 .ofproto_uuid = ctx->xbridge->ofproto->uuid,
4826 .stack = ctx->stack.data,
4827 .stack_size = ctx->stack.size,
4828 .mirrors = ctx->mirrors,
4829 .conntracked = ctx->conntracked,
aeb6566d 4830 .was_mpls = ctx->was_mpls,
d39ec23d
JP
4831 .ofpacts = NULL,
4832 .ofpacts_len = 0,
4833 .action_set = NULL,
4834 .action_set_len = 0,
4835 .userdata = CONST_CAST(uint8_t *, userdata),
4836 .userdata_len = userdata_len,
9bfe9334 4837 };
d39ec23d 4838 frozen_metadata_from_flow(&state.metadata, &ctx->xin->flow);
9583bc14 4839
d39ec23d
JP
4840 uint32_t recirc_id = recirc_alloc_id_ctx(&state);
4841 if (!recirc_id) {
4842 xlate_report_error(ctx, "Failed to allocate recirculation id");
4843 ctx->error = XLATE_NO_RECIRCULATION_CONTEXT;
4844 return;
4845 }
4846 recirc_refs_add(&ctx->xout->recircs, recirc_id);
df70a773 4847
206ddb9a
JP
4848 /* If the controller action didn't request a meter (indicated by a
4849 * 'meter_id' argument other than NX_CTLR_NO_METER), see if one was
4850 * configured through the "controller" virtual meter.
4851 *
4852 * Internally, ovs-vswitchd uses UINT32_MAX to indicate no meter is
4853 * configured. */
4854 uint32_t meter_id;
4855 if (provider_meter_id == UINT32_MAX) {
4856 meter_id = ctx->xbridge->ofproto->up.controller_meter_id;
4857 } else {
4858 meter_id = provider_meter_id;
4859 }
4860
d39ec23d
JP
4861 size_t offset;
4862 size_t ac_offset;
d39ec23d
JP
4863 if (meter_id != UINT32_MAX) {
4864 /* If controller meter is configured, generate clone(meter, userspace)
4865 * action. */
4866 offset = nl_msg_start_nested(ctx->odp_actions, OVS_ACTION_ATTR_SAMPLE);
4867 nl_msg_put_u32(ctx->odp_actions, OVS_SAMPLE_ATTR_PROBABILITY,
4868 UINT32_MAX);
4869 ac_offset = nl_msg_start_nested(ctx->odp_actions,
4870 OVS_SAMPLE_ATTR_ACTIONS);
4871 nl_msg_put_u32(ctx->odp_actions, OVS_ACTION_ATTR_METER, meter_id);
df70a773 4872 }
3b4fff43 4873
d39ec23d
JP
4874 /* Generate the datapath flows even if we don't send the packet-in
4875 * so that debugging more closely represents normal state. */
74c4530d 4876 bool dont_send = false;
d39ec23d 4877 if (!ctx->xin->allow_side_effects && !ctx->xin->xcache) {
74c4530d 4878 dont_send = true;
d39ec23d 4879 }
74c4530d
JP
4880 put_controller_user_action(ctx, dont_send, false, recirc_id, len,
4881 reason, controller_id);
d39ec23d
JP
4882
4883 if (meter_id != UINT32_MAX) {
4884 nl_msg_end_nested(ctx->odp_actions, ac_offset);
4885 nl_msg_end_nested(ctx->odp_actions, offset);
4886 }
9583bc14
EJ
4887}
4888
e6bc8e74
YHW
4889/* Creates a frozen state, and allocates a unique recirc id for the given
4890 * state. Returns a non-zero recirc id if it is allocated successfully.
4891 * Returns 0 otherwise.
4892 **/
4893static uint32_t
77ab5fd2
BP
4894finish_freezing__(struct xlate_ctx *ctx, uint8_t table)
4895{
1d361a81 4896 ovs_assert(ctx->freezing);
7bbdd84f 4897
1d361a81 4898 struct frozen_state state = {
07659514 4899 .table_id = table,
07a3cd5c 4900 .ofproto_uuid = ctx->xbridge->ofproto->uuid,
5c1b2314 4901 .stack = ctx->stack.data,
84cf3c1f 4902 .stack_size = ctx->stack.size,
29bae541 4903 .mirrors = ctx->mirrors,
07659514 4904 .conntracked = ctx->conntracked,
aeb6566d 4905 .was_mpls = ctx->was_mpls,
00135b86 4906 .xport_uuid = ctx->xin->xport_uuid,
1d361a81
BP
4907 .ofpacts = ctx->frozen_actions.data,
4908 .ofpacts_len = ctx->frozen_actions.size,
417509fa 4909 .action_set = ctx->action_set.data,
8a5fb3b4 4910 .action_set_len = ctx->action_set.size,
74c4530d
JP
4911 .userdata = ctx->pause ? CONST_CAST(uint8_t *,ctx->pause->userdata)
4912 : NULL,
4913 .userdata_len = ctx->pause ? ctx->pause->userdata_len : 0,
2082425c 4914 };
77ab5fd2 4915 frozen_metadata_from_flow(&state.metadata, &ctx->xin->flow);
2082425c 4916
74c4530d
JP
4917 /* Allocate a unique recirc id for the given metadata state in the
4918 * flow. An existing id, with a new reference to the corresponding
4919 * recirculation context, will be returned if possible.
4920 * The life-cycle of this recirc id is managed by associating it
4921 * with the udpif key ('ukey') created for each new datapath flow. */
4922 uint32_t recirc_id = recirc_alloc_id_ctx(&state);
4923 if (!recirc_id) {
4924 xlate_report_error(ctx, "Failed to allocate recirculation id");
4925 ctx->error = XLATE_NO_RECIRCULATION_CONTEXT;
4926 return 0;
4927 }
4928 recirc_refs_add(&ctx->xout->recircs, recirc_id);
4929
77ab5fd2 4930 if (ctx->pause) {
74c4530d 4931 if (!ctx->xin->allow_side_effects && !ctx->xin->xcache) {
e6bc8e74 4932 return 0;
77ab5fd2 4933 }
7bbdd84f 4934
74c4530d
JP
4935 put_controller_user_action(ctx, false, true, recirc_id,
4936 ctx->pause->max_len,
4937 ctx->pause->reason,
4938 ctx->pause->controller_id);
4939 } else {
53cc166a
JR
4940 if (ctx->recirc_update_dp_hash) {
4941 struct ovs_action_hash *act_hash;
4942
4943 /* Hash action. */
4944 act_hash = nl_msg_put_unspec_uninit(ctx->odp_actions,
4945 OVS_ACTION_ATTR_HASH,
4946 sizeof *act_hash);
2e3fd24c
JS
4947 act_hash->hash_alg = ctx->dp_hash_alg;
4948 act_hash->hash_basis = ctx->dp_hash_basis;
53cc166a 4949 }
74c4530d 4950 nl_msg_put_u32(ctx->odp_actions, OVS_ACTION_ATTR_RECIRC, recirc_id);
77ab5fd2 4951 }
e672ff9b 4952
1d361a81
BP
4953 /* Undo changes done by freezing. */
4954 ctx_cancel_freeze(ctx);
74c4530d 4955 return recirc_id;
7bbdd84f
SH
4956}
4957
1d361a81 4958/* Called only when we're freezing. */
07659514 4959static void
77ab5fd2 4960finish_freezing(struct xlate_ctx *ctx)
07659514
JS
4961{
4962 xlate_commit_actions(ctx);
77ab5fd2 4963 finish_freezing__(ctx, 0);
07659514
JS
4964}
4965
e37b8437
JS
4966/* Fork the pipeline here. The current packet will continue processing the
4967 * current action list. A clone of the current packet will recirculate, skip
4968 * the remainder of the current action list and asynchronously resume pipeline
4969 * processing in 'table' with the current metadata and action set. */
4970static void
5fdd80cc
YHW
4971compose_recirculate_and_fork(struct xlate_ctx *ctx, uint8_t table,
4972 const uint16_t zone)
e37b8437 4973{
e6bc8e74 4974 uint32_t recirc_id;
1d361a81 4975 ctx->freezing = true;
e6bc8e74
YHW
4976 recirc_id = finish_freezing__(ctx, table);
4977
4978 if (OVS_UNLIKELY(ctx->xin->trace) && recirc_id) {
4979 if (oftrace_add_recirc_node(ctx->xin->recirc_queue,
4980 OFT_RECIRC_CONNTRACK, &ctx->xin->flow,
5fdd80cc 4981 ctx->xin->packet, recirc_id, zone)) {
e6bc8e74
YHW
4982 xlate_report(ctx, OFT_DETAIL, "A clone of the packet is forked to "
4983 "recirculate. The forked pipeline will be resumed at "
4984 "table %u.", table);
4985 } else {
4986 xlate_report(ctx, OFT_DETAIL, "Failed to trace the conntrack "
4987 "forked pipeline with recirc_id = %d.", recirc_id);
4988 }
4989 }
e37b8437
JS
4990}
4991
8bfd0fda
BP
4992static void
4993compose_mpls_push_action(struct xlate_ctx *ctx, struct ofpact_push_mpls *mpls)
9583bc14 4994{
33bf9176 4995 struct flow *flow = &ctx->xin->flow;
8bfd0fda 4996 int n;
33bf9176 4997
8bfd0fda 4998 ovs_assert(eth_type_mpls(mpls->ethertype));
b0a17866 4999
49a73e0c 5000 n = flow_count_mpls_labels(flow, ctx->wc);
8bfd0fda 5001 if (!n) {
704bb0bf 5002 xlate_commit_actions(ctx);
8bfd0fda
BP
5003 } else if (n >= FLOW_MAX_MPLS_LABELS) {
5004 if (ctx->xin->packet != NULL) {
2d9b49dd
BP
5005 xlate_report_error(ctx, "dropping packet on which an MPLS push "
5006 "action can't be performed as it would have "
5007 "more MPLS LSEs than the %d supported.",
5008 FLOW_MAX_MPLS_LABELS);
9583bc14 5009 }
fff1b9c0 5010 ctx->error = XLATE_TOO_MANY_MPLS_LABELS;
8bfd0fda 5011 return;
9583bc14 5012 }
b0a17866 5013
742c0ac3
JR
5014 /* Update flow's MPLS stack, and clear L3/4 fields to mark them invalid. */
5015 flow_push_mpls(flow, n, mpls->ethertype, ctx->wc, true);
9583bc14
EJ
5016}
5017
8bfd0fda 5018static void
9cfef3d0 5019compose_mpls_pop_action(struct xlate_ctx *ctx, ovs_be16 eth_type)
9583bc14 5020{
8bfd0fda 5021 struct flow *flow = &ctx->xin->flow;
49a73e0c 5022 int n = flow_count_mpls_labels(flow, ctx->wc);
33bf9176 5023
49a73e0c 5024 if (flow_pop_mpls(flow, n, eth_type, ctx->wc)) {
8bf009bf 5025 if (!eth_type_mpls(eth_type) && ctx->xbridge->support.odp.recirc) {
e12ec36b 5026 ctx->was_mpls = true;
7bbdd84f
SH
5027 }
5028 } else if (n >= FLOW_MAX_MPLS_LABELS) {
8bfd0fda 5029 if (ctx->xin->packet != NULL) {
2d9b49dd
BP
5030 xlate_report_error(ctx, "dropping packet on which an "
5031 "MPLS pop action can't be performed as it has "
5032 "more MPLS LSEs than the %d supported.",
5033 FLOW_MAX_MPLS_LABELS);
8bfd0fda 5034 }
fff1b9c0 5035 ctx->error = XLATE_TOO_MANY_MPLS_LABELS;
1520ef4f 5036 ofpbuf_clear(ctx->odp_actions);
9583bc14
EJ
5037 }
5038}
5039
5040static bool
5041compose_dec_ttl(struct xlate_ctx *ctx, struct ofpact_cnt_ids *ids)
5042{
33bf9176
BP
5043 struct flow *flow = &ctx->xin->flow;
5044
5045 if (!is_ip_any(flow)) {
9583bc14
EJ
5046 return false;
5047 }
5048
49a73e0c 5049 ctx->wc->masks.nw_ttl = 0xff;
33bf9176
BP
5050 if (flow->nw_ttl > 1) {
5051 flow->nw_ttl--;
9583bc14
EJ
5052 return false;
5053 } else {
5054 size_t i;
5055
5056 for (i = 0; i < ids->n_controllers; i++) {
d39ec23d 5057 xlate_controller_action(ctx, UINT16_MAX, OFPR_INVALID_TTL,
206ddb9a 5058 ids->cnt_ids[i], UINT32_MAX, NULL, 0);
9583bc14
EJ
5059 }
5060
5061 /* Stop processing for current table. */
2d9b49dd
BP
5062 xlate_report(ctx, OFT_WARN, "IPv%d decrement TTL exception",
5063 flow->dl_type == htons(ETH_TYPE_IP) ? 4 : 6);
9583bc14
EJ
5064 return true;
5065 }
5066}
5067
8bfd0fda 5068static void
097d4939
JR
5069compose_set_mpls_label_action(struct xlate_ctx *ctx, ovs_be32 label)
5070{
8bfd0fda 5071 if (eth_type_mpls(ctx->xin->flow.dl_type)) {
49a73e0c 5072 ctx->wc->masks.mpls_lse[0] |= htonl(MPLS_LABEL_MASK);
8bfd0fda 5073 set_mpls_lse_label(&ctx->xin->flow.mpls_lse[0], label);
097d4939 5074 }
097d4939
JR
5075}
5076
8bfd0fda 5077static void
097d4939
JR
5078compose_set_mpls_tc_action(struct xlate_ctx *ctx, uint8_t tc)
5079{
8bfd0fda 5080 if (eth_type_mpls(ctx->xin->flow.dl_type)) {
49a73e0c 5081 ctx->wc->masks.mpls_lse[0] |= htonl(MPLS_TC_MASK);
8bfd0fda 5082 set_mpls_lse_tc(&ctx->xin->flow.mpls_lse[0], tc);
097d4939 5083 }
097d4939
JR
5084}
5085
491e05c2
YY
5086static bool
5087compose_dec_nsh_ttl_action(struct xlate_ctx *ctx)
5088{
5089 struct flow *flow = &ctx->xin->flow;
5090
5091 if ((flow->packet_type == htonl(PT_NSH)) ||
5092 (flow->dl_type == htons(ETH_TYPE_NSH))) {
5093 ctx->wc->masks.nsh.ttl = 0xff;
5094 if (flow->nsh.ttl > 1) {
5095 flow->nsh.ttl--;
5096 return false;
5097 } else {
5098 xlate_controller_action(ctx, UINT16_MAX, OFPR_INVALID_TTL,
206ddb9a 5099 0, UINT32_MAX, NULL, 0);
491e05c2
YY
5100 }
5101 }
5102
5103 /* Stop processing for current table. */
5104 xlate_report(ctx, OFT_WARN, "NSH decrement TTL exception");
5105 return true;
5106}
5107
8bfd0fda 5108static void
9cfef3d0 5109compose_set_mpls_ttl_action(struct xlate_ctx *ctx, uint8_t ttl)
9583bc14 5110{
8bfd0fda 5111 if (eth_type_mpls(ctx->xin->flow.dl_type)) {
49a73e0c 5112 ctx->wc->masks.mpls_lse[0] |= htonl(MPLS_TTL_MASK);
8bfd0fda 5113 set_mpls_lse_ttl(&ctx->xin->flow.mpls_lse[0], ttl);
b0a17866 5114 }
9583bc14
EJ
5115}
5116
5117static bool
9cfef3d0 5118compose_dec_mpls_ttl_action(struct xlate_ctx *ctx)
9583bc14 5119{
33bf9176 5120 struct flow *flow = &ctx->xin->flow;
1dd35f8a 5121
8bfd0fda 5122 if (eth_type_mpls(flow->dl_type)) {
22d38fca
JR
5123 uint8_t ttl = mpls_lse_to_ttl(flow->mpls_lse[0]);
5124
49a73e0c 5125 ctx->wc->masks.mpls_lse[0] |= htonl(MPLS_TTL_MASK);
8bfd0fda
BP
5126 if (ttl > 1) {
5127 ttl--;
5128 set_mpls_lse_ttl(&flow->mpls_lse[0], ttl);
5129 return false;
5130 } else {
d39ec23d 5131 xlate_controller_action(ctx, UINT16_MAX, OFPR_INVALID_TTL, 0,
206ddb9a 5132 UINT32_MAX, NULL, 0);
8bfd0fda 5133 }
9583bc14 5134 }
22d38fca
JR
5135
5136 /* Stop processing for current table. */
2d9b49dd 5137 xlate_report(ctx, OFT_WARN, "MPLS decrement TTL exception");
22d38fca 5138 return true;
9583bc14
EJ
5139}
5140
8b496c72
BP
5141/* Emits an action that outputs to 'port', within 'ctx'.
5142 *
5143 * 'controller_len' affects only packets sent to an OpenFlow controller. It
5144 * is the maximum number of bytes of the packet to send. UINT16_MAX means to
5145 * send the whole packet (and 0 means to omit the packet entirely).
5146 *
5147 * 'may_packet_in' determines whether the packet may be sent to an OpenFlow
5148 * controller. If it is false, then the packet is never sent to the OpenFlow
5149 * controller.
5150 *
5151 * 'is_last_action' should be true if this output is the last OpenFlow action
5152 * to be processed, which enables certain optimizations.
5153 *
5154 * 'truncate' should be true if the packet to be output is being truncated,
5155 * which suppresses certain optimizations. */
9583bc14 5156static void
8b496c72
BP
5157xlate_output_action(struct xlate_ctx *ctx, ofp_port_t port,
5158 uint16_t controller_len, bool may_packet_in,
f5634764
KG
5159 bool is_last_action, bool truncate,
5160 bool group_bucket_action)
9583bc14 5161{
2031ef97 5162 ofp_port_t prev_nf_output_iface = ctx->nf_output_iface;
9583bc14 5163
2031ef97 5164 ctx->nf_output_iface = NF_OUT_DROP;
9583bc14
EJ
5165
5166 switch (port) {
5167 case OFPP_IN_PORT:
feee58b9 5168 compose_output_action(ctx, ctx->xin->flow.in_port.ofp_port, NULL,
8bbbda3c 5169 is_last_action, truncate);
9583bc14
EJ
5170 break;
5171 case OFPP_TABLE:
4e022ec0 5172 xlate_table_action(ctx, ctx->xin->flow.in_port.ofp_port,
11938578 5173 0, may_packet_in, true, false, false,
96c3a6e5 5174 do_xlate_actions);
9583bc14
EJ
5175 break;
5176 case OFPP_NORMAL:
5177 xlate_normal(ctx);
5178 break;
5179 case OFPP_FLOOD:
feee58b9 5180 flood_packets(ctx, false, is_last_action);
9583bc14
EJ
5181 break;
5182 case OFPP_ALL:
feee58b9 5183 flood_packets(ctx, true, is_last_action);
9583bc14
EJ
5184 break;
5185 case OFPP_CONTROLLER:
d39ec23d
JP
5186 xlate_controller_action(ctx, controller_len,
5187 (ctx->in_packet_out ? OFPR_PACKET_OUT
f5634764 5188 : group_bucket_action ? OFPR_GROUP
d39ec23d
JP
5189 : ctx->in_action_set ? OFPR_ACTION_SET
5190 : OFPR_ACTION),
206ddb9a 5191 0, UINT32_MAX, NULL, 0);
9583bc14
EJ
5192 break;
5193 case OFPP_NONE:
5194 break;
5195 case OFPP_LOCAL:
5196 default:
4e022ec0 5197 if (port != ctx->xin->flow.in_port.ofp_port) {
8bbbda3c 5198 compose_output_action(ctx, port, NULL, is_last_action, truncate);
9583bc14 5199 } else {
f89547e2 5200 xlate_report_info(ctx, "skipping output to input port");
9583bc14
EJ
5201 }
5202 break;
5203 }
5204
5205 if (prev_nf_output_iface == NF_OUT_FLOOD) {
2031ef97
BP
5206 ctx->nf_output_iface = NF_OUT_FLOOD;
5207 } else if (ctx->nf_output_iface == NF_OUT_DROP) {
5208 ctx->nf_output_iface = prev_nf_output_iface;
9583bc14 5209 } else if (prev_nf_output_iface != NF_OUT_DROP &&
2031ef97
BP
5210 ctx->nf_output_iface != NF_OUT_FLOOD) {
5211 ctx->nf_output_iface = NF_OUT_MULTI;
9583bc14
EJ
5212 }
5213}
5214
5215static void
5216xlate_output_reg_action(struct xlate_ctx *ctx,
feee58b9 5217 const struct ofpact_output_reg *or,
f5634764
KG
5218 bool is_last_action,
5219 bool group_bucket_action)
9583bc14
EJ
5220{
5221 uint64_t port = mf_get_subfield(&or->src, &ctx->xin->flow);
5222 if (port <= UINT16_MAX) {
2d9b49dd
BP
5223 xlate_report(ctx, OFT_DETAIL, "output port is %"PRIu64, port);
5224
9583bc14
EJ
5225 union mf_subvalue value;
5226
5227 memset(&value, 0xff, sizeof value);
49a73e0c 5228 mf_write_subfield_flow(&or->src, &value, &ctx->wc->masks);
8b496c72 5229 xlate_output_action(ctx, u16_to_ofp(port), or->max_len,
f5634764
KG
5230 false, is_last_action, false,
5231 group_bucket_action);
2d9b49dd
BP
5232 } else {
5233 xlate_report(ctx, OFT_WARN, "output port %"PRIu64" is out of range",
5234 port);
9583bc14
EJ
5235 }
5236}
5237
aaca4fe0
WT
5238static void
5239xlate_output_trunc_action(struct xlate_ctx *ctx,
feee58b9 5240 ofp_port_t port, uint32_t max_len,
f5634764
KG
5241 bool is_last_action,
5242 bool group_bucket_action)
aaca4fe0
WT
5243{
5244 bool support_trunc = ctx->xbridge->support.trunc;
5245 struct ovs_action_trunc *trunc;
29718ad4 5246 char name[OFP_MAX_PORT_NAME_LEN];
aaca4fe0
WT
5247
5248 switch (port) {
5249 case OFPP_TABLE:
5250 case OFPP_NORMAL:
5251 case OFPP_FLOOD:
5252 case OFPP_ALL:
5253 case OFPP_CONTROLLER:
5254 case OFPP_NONE:
50f96b10 5255 ofputil_port_to_string(port, NULL, name, sizeof name);
2d9b49dd
BP
5256 xlate_report(ctx, OFT_WARN,
5257 "output_trunc does not support port: %s", name);
aaca4fe0
WT
5258 break;
5259 case OFPP_LOCAL:
5260 case OFPP_IN_PORT:
5261 default:
5262 if (port != ctx->xin->flow.in_port.ofp_port) {
5263 const struct xport *xport = get_ofp_port(ctx->xbridge, port);
5264
5265 if (xport == NULL || xport->odp_port == ODPP_NONE) {
5266 /* Since truncate happens at its following output action, if
5267 * the output port is a patch port, the behavior is somehow
49f17344 5268 * unpredictable. For simplicity, disallow this case. */
50f96b10 5269 ofputil_port_to_string(port, NULL, name, sizeof name);
2d9b49dd
BP
5270 xlate_report_error(ctx, "output_trunc does not support "
5271 "patch port %s", name);
aaca4fe0
WT
5272 break;
5273 }
5274
5275 trunc = nl_msg_put_unspec_uninit(ctx->odp_actions,
5276 OVS_ACTION_ATTR_TRUNC,
5277 sizeof *trunc);
5278 trunc->max_len = max_len;
f5634764
KG
5279 xlate_output_action(ctx, port, 0, false, is_last_action, true,
5280 group_bucket_action);
aaca4fe0
WT
5281 if (!support_trunc) {
5282 ctx->xout->slow |= SLOW_ACTION;
5283 }
5284 } else {
f89547e2 5285 xlate_report_info(ctx, "skipping output to input port");
aaca4fe0
WT
5286 }
5287 break;
5288 }
5289}
5290
9583bc14
EJ
5291static void
5292xlate_enqueue_action(struct xlate_ctx *ctx,
feee58b9 5293 const struct ofpact_enqueue *enqueue,
f5634764
KG
5294 bool is_last_action,
5295 bool group_bucket_action)
9583bc14 5296{
4e022ec0 5297 ofp_port_t ofp_port = enqueue->port;
9583bc14
EJ
5298 uint32_t queue_id = enqueue->queue;
5299 uint32_t flow_priority, priority;
5300 int error;
5301
5302 /* Translate queue to priority. */
89a8a7f0 5303 error = dpif_queue_to_priority(ctx->xbridge->dpif, queue_id, &priority);
9583bc14
EJ
5304 if (error) {
5305 /* Fall back to ordinary output action. */
8b496c72 5306 xlate_output_action(ctx, enqueue->port, 0, false,
f5634764
KG
5307 is_last_action, false,
5308 group_bucket_action);
9583bc14
EJ
5309 return;
5310 }
5311
5312 /* Check output port. */
5313 if (ofp_port == OFPP_IN_PORT) {
4e022ec0
AW
5314 ofp_port = ctx->xin->flow.in_port.ofp_port;
5315 } else if (ofp_port == ctx->xin->flow.in_port.ofp_port) {
9583bc14
EJ
5316 return;
5317 }
5318
5319 /* Add datapath actions. */
5320 flow_priority = ctx->xin->flow.skb_priority;
5321 ctx->xin->flow.skb_priority = priority;
11938578 5322 compose_output_action(ctx, ofp_port, NULL, is_last_action, false);
9583bc14
EJ
5323 ctx->xin->flow.skb_priority = flow_priority;
5324
5325 /* Update NetFlow output port. */
2031ef97
BP
5326 if (ctx->nf_output_iface == NF_OUT_DROP) {
5327 ctx->nf_output_iface = ofp_port;
5328 } else if (ctx->nf_output_iface != NF_OUT_FLOOD) {
5329 ctx->nf_output_iface = NF_OUT_MULTI;
9583bc14
EJ
5330 }
5331}
5332
5333static void
5334xlate_set_queue_action(struct xlate_ctx *ctx, uint32_t queue_id)
5335{
5336 uint32_t skb_priority;
5337
89a8a7f0 5338 if (!dpif_queue_to_priority(ctx->xbridge->dpif, queue_id, &skb_priority)) {
9583bc14
EJ
5339 ctx->xin->flow.skb_priority = skb_priority;
5340 } else {
5341 /* Couldn't translate queue to a priority. Nothing to do. A warning
5342 * has already been logged. */
5343 }
5344}
5345
5346static bool
46c88433 5347slave_enabled_cb(ofp_port_t ofp_port, void *xbridge_)
9583bc14 5348{
46c88433
EJ
5349 const struct xbridge *xbridge = xbridge_;
5350 struct xport *port;
9583bc14
EJ
5351
5352 switch (ofp_port) {
5353 case OFPP_IN_PORT:
5354 case OFPP_TABLE:
5355 case OFPP_NORMAL:
5356 case OFPP_FLOOD:
5357 case OFPP_ALL:
5358 case OFPP_NONE:
5359 return true;
5360 case OFPP_CONTROLLER: /* Not supported by the bundle action. */
5361 return false;
5362 default:
46c88433 5363 port = get_ofp_port(xbridge, ofp_port);
9583bc14
EJ
5364 return port ? port->may_enable : false;
5365 }
5366}
5367
5368static void
5369xlate_bundle_action(struct xlate_ctx *ctx,
feee58b9 5370 const struct ofpact_bundle *bundle,
f5634764
KG
5371 bool is_last_action,
5372 bool group_bucket_action)
9583bc14 5373{
4e022ec0 5374 ofp_port_t port;
9583bc14 5375
49a73e0c 5376 port = bundle_execute(bundle, &ctx->xin->flow, ctx->wc, slave_enabled_cb,
46c88433 5377 CONST_CAST(struct xbridge *, ctx->xbridge));
9583bc14 5378 if (bundle->dst.field) {
49a73e0c 5379 nxm_reg_load(&bundle->dst, ofp_to_u16(port), &ctx->xin->flow, ctx->wc);
2d9b49dd 5380 xlate_report_subfield(ctx, &bundle->dst);
9583bc14 5381 } else {
f5634764
KG
5382 xlate_output_action(ctx, port, 0, false, is_last_action, false,
5383 group_bucket_action);
9583bc14
EJ
5384 }
5385}
5386
4165b5e0
JS
5387static void
5388xlate_learn_action(struct xlate_ctx *ctx, const struct ofpact_learn *learn)
5389{
49a73e0c 5390 learn_mask(learn, ctx->wc);
9583bc14 5391
df70a773 5392 if (ctx->xin->xcache || ctx->xin->allow_side_effects) {
4165b5e0
JS
5393 uint64_t ofpacts_stub[1024 / 8];
5394 struct ofputil_flow_mod fm;
2c7ee524 5395 struct ofproto_flow_mod ofm__, *ofm;
4165b5e0 5396 struct ofpbuf ofpacts;
2c7ee524
JR
5397 enum ofperr error;
5398
5399 if (ctx->xin->xcache) {
3f3b97b0 5400 ofm = xmalloc(sizeof *ofm);
2c7ee524
JR
5401 } else {
5402 ofm = &ofm__;
5403 }
4165b5e0
JS
5404
5405 ofpbuf_use_stub(&ofpacts, ofpacts_stub, sizeof ofpacts_stub);
2c7ee524 5406 learn_execute(learn, &ctx->xin->flow, &fm, &ofpacts);
2d9b49dd
BP
5407 if (OVS_UNLIKELY(ctx->xin->trace)) {
5408 struct ds s = DS_EMPTY_INITIALIZER;
5409 ds_put_format(&s, "table=%"PRIu8" ", fm.table_id);
6a6b7060
BP
5410 minimatch_format(&fm.match,
5411 ofproto_get_tun_tab(&ctx->xin->ofproto->up),
5412 NULL, &s, OFP_DEFAULT_PRIORITY);
2d9b49dd
BP
5413 ds_chomp(&s, ' ');
5414 ds_put_format(&s, " priority=%d", fm.priority);
5415 if (fm.new_cookie) {
5416 ds_put_format(&s, " cookie=%#"PRIx64, ntohll(fm.new_cookie));
5417 }
5418 if (fm.idle_timeout != OFP_FLOW_PERMANENT) {
5419 ds_put_format(&s, " idle=%"PRIu16, fm.idle_timeout);
5420 }
5421 if (fm.hard_timeout != OFP_FLOW_PERMANENT) {
5422 ds_put_format(&s, " hard=%"PRIu16, fm.hard_timeout);
5423 }
5424 if (fm.flags & NX_LEARN_F_SEND_FLOW_REM) {
5425 ds_put_cstr(&s, " send_flow_rem");
5426 }
5427 ds_put_cstr(&s, " actions=");
efefbcae
BP
5428 struct ofpact_format_params fp = { .s = &s };
5429 ofpacts_format(fm.ofpacts, fm.ofpacts_len, &fp);
2d9b49dd
BP
5430 xlate_report(ctx, OFT_DETAIL, "%s", ds_cstr(&s));
5431 ds_destroy(&s);
5432 }
2c7ee524
JR
5433 error = ofproto_dpif_flow_mod_init_for_learn(ctx->xbridge->ofproto,
5434 &fm, ofm);
4165b5e0 5435 ofpbuf_uninit(&ofpacts);
2c7ee524 5436
3f3b97b0 5437 if (!error) {
4c71600d 5438 bool success = true;
3f3b97b0 5439 if (ctx->xin->allow_side_effects) {
4c71600d
DDP
5440 error = ofproto_flow_mod_learn(ofm, ctx->xin->xcache != NULL,
5441 learn->limit, &success);
5442 } else if (learn->limit) {
5443 if (!ofm->temp_rule
5444 || ofm->temp_rule->state != RULE_INSERTED) {
5445 /* The learned rule expired and there are no packets, so
5446 * we cannot learn again. Since the translated actions
5447 * depend on the result of learning, we tell the caller
5448 * that there's no point in caching this result. */
5449 ctx->xout->avoid_caching = true;
5450 }
3f3b97b0
DDP
5451 }
5452
4c71600d
DDP
5453 if (learn->flags & NX_LEARN_F_WRITE_RESULT) {
5454 nxm_reg_load(&learn->result_dst, success ? 1 : 0,
5455 &ctx->xin->flow, ctx->wc);
5456 xlate_report_subfield(ctx, &learn->result_dst);
5457 }
5458
5459 if (success && ctx->xin->xcache) {
3f3b97b0
DDP
5460 struct xc_entry *entry;
5461
5462 entry = xlate_cache_add_entry(ctx->xin->xcache, XC_LEARN);
5463 entry->learn.ofm = ofm;
4c71600d 5464 entry->learn.limit = learn->limit;
3f3b97b0 5465 ofm = NULL;
1bddcb5d
YS
5466 } else {
5467 ofproto_flow_mod_uninit(ofm);
3f3b97b0 5468 }
4c71600d
DDP
5469
5470 if (OVS_UNLIKELY(ctx->xin->trace && !success)) {
5471 xlate_report(ctx, OFT_DETAIL, "Limit exceeded, learn failed");
5472 }
3f3b97b0
DDP
5473 }
5474
3f76c123 5475 if (ofm != &ofm__) {
3f3b97b0 5476 free(ofm);
2c7ee524
JR
5477 }
5478
5479 if (error) {
2d9b49dd
BP
5480 xlate_report_error(ctx, "LEARN action execution failed (%s).",
5481 ofperr_to_string(error));
2c7ee524 5482 }
6a6b7060
BP
5483
5484 minimatch_destroy(&fm.match);
2d9b49dd
BP
5485 } else {
5486 xlate_report(ctx, OFT_WARN,
5487 "suppressing side effects, so learn action ignored");
b256dc52
JS
5488 }
5489}
5490
5491static void
5492xlate_fin_timeout__(struct rule_dpif *rule, uint16_t tcp_flags,
5493 uint16_t idle_timeout, uint16_t hard_timeout)
5494{
5495 if (tcp_flags & (TCP_FIN | TCP_RST)) {
07a3cd5c 5496 ofproto_rule_reduce_timeouts(&rule->up, idle_timeout, hard_timeout);
b256dc52 5497 }
9583bc14
EJ
5498}
5499
9583bc14
EJ
5500static void
5501xlate_fin_timeout(struct xlate_ctx *ctx,
5502 const struct ofpact_fin_timeout *oft)
5503{
b256dc52 5504 if (ctx->rule) {
df70a773
JR
5505 if (ctx->xin->allow_side_effects) {
5506 xlate_fin_timeout__(ctx->rule, ctx->xin->tcp_flags,
5507 oft->fin_idle_timeout, oft->fin_hard_timeout);
5508 }
b256dc52
JS
5509 if (ctx->xin->xcache) {
5510 struct xc_entry *entry;
5511
5512 entry = xlate_cache_add_entry(ctx->xin->xcache, XC_FIN_TIMEOUT);
83709dfa
JR
5513 /* XC_RULE already holds a reference on the rule, none is taken
5514 * here. */
901a517e
JR
5515 entry->fin.rule = ctx->rule;
5516 entry->fin.idle = oft->fin_idle_timeout;
5517 entry->fin.hard = oft->fin_hard_timeout;
b256dc52 5518 }
9583bc14
EJ
5519 }
5520}
5521
5522static void
5523xlate_sample_action(struct xlate_ctx *ctx,
5524 const struct ofpact_sample *os)
5525{
f69f713b
BY
5526 odp_port_t output_odp_port = ODPP_NONE;
5527 odp_port_t tunnel_out_port = ODPP_NONE;
5528 struct dpif_ipfix *ipfix = ctx->xbridge->ipfix;
5529 bool emit_set_tunnel = false;
5530
5531 if (!ipfix || ctx->xin->flow.in_port.ofp_port == OFPP_NONE) {
5532 return;
5533 }
5534
e824d78d
JR
5535 /* Scale the probability from 16-bit to 32-bit while representing
5536 * the same percentage. */
5537 uint32_t probability = (os->probability << 16) | os->probability;
5538
f69f713b
BY
5539 /* If ofp_port in flow sample action is equel to ofp_port,
5540 * this sample action is a input port action. */
5541 if (os->sampling_port != OFPP_NONE &&
5542 os->sampling_port != ctx->xin->flow.in_port.ofp_port) {
5543 output_odp_port = ofp_port_to_odp_port(ctx->xbridge,
5544 os->sampling_port);
5545 if (output_odp_port == ODPP_NONE) {
2d9b49dd
BP
5546 xlate_report_error(ctx, "can't use unknown port %d in flow sample "
5547 "action", os->sampling_port);
f69f713b
BY
5548 return;
5549 }
5550
5551 if (dpif_ipfix_get_flow_exporter_tunnel_sampling(ipfix,
5552 os->collector_set_id)
cd32509e 5553 && dpif_ipfix_is_tunnel_port(ipfix, output_odp_port)) {
f69f713b
BY
5554 tunnel_out_port = output_odp_port;
5555 emit_set_tunnel = true;
5556 }
5557 }
5558
5559 xlate_commit_actions(ctx);
5560 /* If 'emit_set_tunnel', sample(sampling_port=1) would translate
5561 * into datapath sample action set(tunnel(...)), sample(...) and
5562 * it is used for sampling egress tunnel information. */
5563 if (emit_set_tunnel) {
5564 const struct xport *xport = get_ofp_port(ctx->xbridge,
5565 os->sampling_port);
5566
5567 if (xport && xport->is_tunnel) {
5568 struct flow *flow = &ctx->xin->flow;
5569 tnl_port_send(xport->ofport, flow, ctx->wc);
5570 if (!ovs_native_tunneling_is_on(ctx->xbridge->ofproto)) {
5571 struct flow_tnl flow_tnl = flow->tunnel;
c6d87201 5572 const char *tnl_type;
f69f713b 5573
c6d87201 5574 tnl_type = tnl_port_get_type(xport->ofport);
f69f713b 5575 commit_odp_tunnel_action(flow, &ctx->base_flow,
c6d87201 5576 ctx->odp_actions, tnl_type);
f69f713b
BY
5577 flow->tunnel = flow_tnl;
5578 }
5579 } else {
2d9b49dd
BP
5580 xlate_report_error(ctx,
5581 "sampling_port:%d should be a tunnel port.",
5582 os->sampling_port);
f69f713b
BY
5583 }
5584 }
e824d78d 5585
24a4bbe1
IM
5586 struct user_action_cookie cookie;
5587
5588 memset(&cookie, 0, sizeof cookie);
5589 cookie.type = USER_ACTION_COOKIE_FLOW_SAMPLE;
5590 cookie.ofp_in_port = ctx->xin->flow.in_port.ofp_port;
5591 cookie.ofproto_uuid = ctx->xbridge->ofproto->uuid;
5592 cookie.flow_sample.probability = os->probability;
5593 cookie.flow_sample.collector_set_id = os->collector_set_id;
5594 cookie.flow_sample.obs_domain_id = os->obs_domain_id;
5595 cookie.flow_sample.obs_point_id = os->obs_point_id;
5596 cookie.flow_sample.output_odp_port = output_odp_port;
5597 cookie.flow_sample.direction = os->direction;
5598
8de6ff3e 5599 compose_sample_action(ctx, probability, &cookie, tunnel_out_port, false);
9583bc14
EJ
5600}
5601
eee69393
AZ
5602/* Determine if an datapath action translated from the openflow action
5603 * can be reversed by another datapath action.
5604 *
5605 * Openflow actions that do not emit datapath actions are trivially
5606 * reversible. Reversiblity of other actions depends on nature of
5607 * action and their translation. */
5608static bool
5609reversible_actions(const struct ofpact *ofpacts, size_t ofpacts_len)
bef503e8 5610{
eee69393 5611 const struct ofpact *a;
bef503e8 5612
eee69393
AZ
5613 OFPACT_FOR_EACH (a, ofpacts, ofpacts_len) {
5614 switch (a->type) {
5615 case OFPACT_BUNDLE:
5616 case OFPACT_CLEAR_ACTIONS:
5617 case OFPACT_CLONE:
5618 case OFPACT_CONJUNCTION:
5619 case OFPACT_CONTROLLER:
5620 case OFPACT_CT_CLEAR:
5621 case OFPACT_DEBUG_RECIRC:
a934a3dd 5622 case OFPACT_DEBUG_SLOW:
eee69393
AZ
5623 case OFPACT_DEC_MPLS_TTL:
5624 case OFPACT_DEC_TTL:
5625 case OFPACT_ENQUEUE:
5626 case OFPACT_EXIT:
5627 case OFPACT_FIN_TIMEOUT:
5628 case OFPACT_GOTO_TABLE:
5629 case OFPACT_GROUP:
5630 case OFPACT_LEARN:
5631 case OFPACT_MULTIPATH:
5632 case OFPACT_NOTE:
5633 case OFPACT_OUTPUT:
5634 case OFPACT_OUTPUT_REG:
5635 case OFPACT_POP_MPLS:
5636 case OFPACT_POP_QUEUE:
5637 case OFPACT_PUSH_MPLS:
5638 case OFPACT_PUSH_VLAN:
5639 case OFPACT_REG_MOVE:
5640 case OFPACT_RESUBMIT:
5641 case OFPACT_SAMPLE:
5642 case OFPACT_SET_ETH_DST:
5643 case OFPACT_SET_ETH_SRC:
5644 case OFPACT_SET_FIELD:
5645 case OFPACT_SET_IP_DSCP:
5646 case OFPACT_SET_IP_ECN:
5647 case OFPACT_SET_IP_TTL:
5648 case OFPACT_SET_IPV4_DST:
5649 case OFPACT_SET_IPV4_SRC:
5650 case OFPACT_SET_L4_DST_PORT:
5651 case OFPACT_SET_L4_SRC_PORT:
5652 case OFPACT_SET_MPLS_LABEL:
5653 case OFPACT_SET_MPLS_TC:
5654 case OFPACT_SET_MPLS_TTL:
5655 case OFPACT_SET_QUEUE:
5656 case OFPACT_SET_TUNNEL:
5657 case OFPACT_SET_VLAN_PCP:
5658 case OFPACT_SET_VLAN_VID:
5659 case OFPACT_STACK_POP:
5660 case OFPACT_STACK_PUSH:
5661 case OFPACT_STRIP_VLAN:
5662 case OFPACT_UNROLL_XLATE:
5663 case OFPACT_WRITE_ACTIONS:
5664 case OFPACT_WRITE_METADATA:
5b34f8fc 5665 case OFPACT_CHECK_PKT_LARGER:
eee69393
AZ
5666 break;
5667
5668 case OFPACT_CT:
5669 case OFPACT_METER:
5670 case OFPACT_NAT:
5671 case OFPACT_OUTPUT_TRUNC:
2142be1f
BP
5672 case OFPACT_ENCAP:
5673 case OFPACT_DECAP:
491e05c2 5674 case OFPACT_DEC_NSH_TTL:
eee69393 5675 return false;
9c2a44dc 5676 }
456024cb 5677 }
eee69393 5678 return true;
bef503e8
AZ
5679}
5680
5681static void
c9f0a445 5682clone_xlate_actions(const struct ofpact *actions, size_t actions_len,
f5634764
KG
5683 struct xlate_ctx *ctx, bool is_last_action,
5684 bool group_bucket_action OVS_UNUSED)
7ae62a67 5685{
b827b231
BP
5686 struct ofpbuf old_stack = ctx->stack;
5687 union mf_subvalue new_stack[1024 / sizeof(union mf_subvalue)];
5688 ofpbuf_use_stub(&ctx->stack, new_stack, sizeof new_stack);
5689 ofpbuf_put(&ctx->stack, old_stack.data, old_stack.size);
5690
5691 struct ofpbuf old_action_set = ctx->action_set;
5692 uint64_t actset_stub[1024 / 8];
5693 ofpbuf_use_stub(&ctx->action_set, actset_stub, sizeof actset_stub);
5694 ofpbuf_put(&ctx->action_set, old_action_set.data, old_action_set.size);
5695
eee69393 5696 size_t offset, ac_offset;
eee69393
AZ
5697 struct flow old_flow = ctx->xin->flow;
5698
feee58b9 5699 if (reversible_actions(actions, actions_len) || is_last_action) {
eee69393 5700 old_flow = ctx->xin->flow;
f5634764 5701 do_xlate_actions(actions, actions_len, ctx, is_last_action, false);
118b21d9
EG
5702 if (!ctx->freezing) {
5703 xlate_action_set(ctx);
5704 }
60eebf12
AZ
5705 if (ctx->freezing) {
5706 finish_freezing(ctx);
5707 }
eee69393
AZ
5708 goto xlate_done;
5709 }
5710
5711 /* Commit datapath actions before emitting the clone action to
5712 * avoid emitting those actions twice. Once inside
5713 * the clone, another time for the action after clone. */
5714 xlate_commit_actions(ctx);
9c2a44dc 5715 struct flow old_base = ctx->base_flow;
eee69393
AZ
5716 bool old_was_mpls = ctx->was_mpls;
5717 bool old_conntracked = ctx->conntracked;
ba653d2a 5718
eee69393
AZ
5719 /* The actions are not reversible, a datapath clone action is
5720 * required to encode the translation. Select the clone action
5721 * based on datapath capabilities. */
5722 if (ctx->xbridge->support.clone) { /* Use clone action */
5723 /* Use clone action as datapath clone. */
5724 offset = nl_msg_start_nested(ctx->odp_actions, OVS_ACTION_ATTR_CLONE);
f5634764 5725 do_xlate_actions(actions, actions_len, ctx, true, false);
118b21d9
EG
5726 if (!ctx->freezing) {
5727 xlate_action_set(ctx);
5728 }
60eebf12
AZ
5729 if (ctx->freezing) {
5730 finish_freezing(ctx);
5731 }
eee69393
AZ
5732 nl_msg_end_non_empty_nested(ctx->odp_actions, offset);
5733 goto dp_clone_done;
5734 }
b827b231 5735
eee69393
AZ
5736 if (ctx->xbridge->support.sample_nesting > 3) {
5737 /* Use sample action as datapath clone. */
5738 offset = nl_msg_start_nested(ctx->odp_actions, OVS_ACTION_ATTR_SAMPLE);
5739 ac_offset = nl_msg_start_nested(ctx->odp_actions,
5740 OVS_SAMPLE_ATTR_ACTIONS);
f5634764 5741 do_xlate_actions(actions, actions_len, ctx, true, false);
118b21d9
EG
5742 if (!ctx->freezing) {
5743 xlate_action_set(ctx);
5744 }
60eebf12
AZ
5745 if (ctx->freezing) {
5746 finish_freezing(ctx);
5747 }
eee69393
AZ
5748 if (nl_msg_end_non_empty_nested(ctx->odp_actions, ac_offset)) {
5749 nl_msg_cancel_nested(ctx->odp_actions, offset);
5750 } else {
5751 nl_msg_put_u32(ctx->odp_actions, OVS_SAMPLE_ATTR_PROBABILITY,
5752 UINT32_MAX); /* 100% probability. */
5753 nl_msg_end_nested(ctx->odp_actions, offset);
5754 }
5755 goto dp_clone_done;
5756 }
5757
5758 /* Datapath does not support clone, skip xlate 'oc' and
5759 * report an error */
5760 xlate_report_error(ctx, "Failed to compose clone action");
ba653d2a 5761
eee69393 5762dp_clone_done:
ba653d2a
BP
5763 /* The clone's conntrack execution should have no effect on the original
5764 * packet. */
5765 ctx->conntracked = old_conntracked;
bd3c2df3
BP
5766
5767 /* Popping MPLS from the clone should have no effect on the original
5768 * packet. */
5769 ctx->was_mpls = old_was_mpls;
eee69393
AZ
5770
5771 /* Restore the 'base_flow' for the next action. */
5772 ctx->base_flow = old_base;
5773
5774xlate_done:
5775 ofpbuf_uninit(&ctx->action_set);
5776 ctx->action_set = old_action_set;
5777 ofpbuf_uninit(&ctx->stack);
5778 ctx->stack = old_stack;
5779 ctx->xin->flow = old_flow;
7ae62a67
WT
5780}
5781
c9f0a445 5782static void
feee58b9
AZ
5783compose_clone(struct xlate_ctx *ctx, const struct ofpact_nest *oc,
5784 bool is_last_action)
c9f0a445
AZ
5785{
5786 size_t oc_actions_len = ofpact_nest_get_action_len(oc);
5787
f5634764
KG
5788 clone_xlate_actions(oc->actions, oc_actions_len, ctx, is_last_action,
5789 false);
c9f0a445
AZ
5790}
5791
076caa2f
JR
5792static void
5793xlate_meter_action(struct xlate_ctx *ctx, const struct ofpact_meter *meter)
5794{
5795 if (meter->provider_meter_id != UINT32_MAX) {
5796 nl_msg_put_u32(ctx->odp_actions, OVS_ACTION_ATTR_METER,
5797 meter->provider_meter_id);
5798 }
5799}
5800
9583bc14 5801static bool
46c88433 5802may_receive(const struct xport *xport, struct xlate_ctx *ctx)
9583bc14 5803{
bbbca389 5804 if (xport->config & (is_stp(&ctx->xin->flow)
46c88433
EJ
5805 ? OFPUTIL_PC_NO_RECV_STP
5806 : OFPUTIL_PC_NO_RECV)) {
9583bc14
EJ
5807 return false;
5808 }
5809
5810 /* Only drop packets here if both forwarding and learning are
5811 * disabled. If just learning is enabled, we need to have
5812 * OFPP_NORMAL and the learning action have a look at the packet
5813 * before we can drop it. */
9efd308e
DV
5814 if ((!xport_stp_forward_state(xport) && !xport_stp_learn_state(xport)) ||
5815 (!xport_rstp_forward_state(xport) && !xport_rstp_learn_state(xport))) {
9583bc14
EJ
5816 return false;
5817 }
5818
5819 return true;
5820}
5821
7fdb60a7 5822static void
7e7e8dbb
BP
5823xlate_write_actions__(struct xlate_ctx *ctx,
5824 const struct ofpact *ofpacts, size_t ofpacts_len)
7fdb60a7 5825{
c61f3870
BP
5826 /* Maintain actset_output depending on the contents of the action set:
5827 *
5828 * - OFPP_UNSET, if there is no "output" action.
5829 *
5830 * - The output port, if there is an "output" action and no "group"
5831 * action.
5832 *
5833 * - OFPP_UNSET, if there is a "group" action.
5834 */
5835 if (!ctx->action_set_has_group) {
7e7e8dbb
BP
5836 const struct ofpact *a;
5837 OFPACT_FOR_EACH (a, ofpacts, ofpacts_len) {
5838 if (a->type == OFPACT_OUTPUT) {
5839 ctx->xin->flow.actset_output = ofpact_get_OUTPUT(a)->port;
5840 } else if (a->type == OFPACT_GROUP) {
c61f3870
BP
5841 ctx->xin->flow.actset_output = OFPP_UNSET;
5842 ctx->action_set_has_group = true;
9055ca9a 5843 break;
c61f3870
BP
5844 }
5845 }
5846 }
5847
7e7e8dbb
BP
5848 ofpbuf_put(&ctx->action_set, ofpacts, ofpacts_len);
5849}
5850
5851static void
5852xlate_write_actions(struct xlate_ctx *ctx, const struct ofpact_nest *a)
5853{
5854 xlate_write_actions__(ctx, a->actions, ofpact_nest_get_action_len(a));
7fdb60a7
SH
5855}
5856
5857static void
5858xlate_action_set(struct xlate_ctx *ctx)
5859{
2d9b49dd
BP
5860 uint64_t action_list_stub[1024 / 8];
5861 struct ofpbuf action_list = OFPBUF_STUB_INITIALIZER(action_list_stub);
7fdb60a7 5862 ofpacts_execute_action_set(&action_list, &ctx->action_set);
ed9c9e3e
JR
5863 /* Clear the action set, as it is not needed any more. */
5864 ofpbuf_clear(&ctx->action_set);
2d9b49dd
BP
5865 if (action_list.size) {
5866 ctx->in_action_set = true;
5867
5868 struct ovs_list *old_trace = ctx->xin->trace;
5869 ctx->xin->trace = xlate_report(ctx, OFT_TABLE,
5870 "--. Executing action set:");
f5634764 5871 do_xlate_actions(action_list.data, action_list.size, ctx, true, false);
2d9b49dd
BP
5872 ctx->xin->trace = old_trace;
5873
5874 ctx->in_action_set = false;
5875 }
7fdb60a7
SH
5876 ofpbuf_uninit(&action_list);
5877}
5878
e672ff9b 5879static void
1d361a81 5880freeze_put_unroll_xlate(struct xlate_ctx *ctx)
e672ff9b 5881{
1d361a81 5882 struct ofpact_unroll_xlate *unroll = ctx->frozen_actions.header;
e672ff9b
JR
5883
5884 /* Restore the table_id and rule cookie for a potential PACKET
5885 * IN if needed. */
5886 if (!unroll ||
5887 (ctx->table_id != unroll->rule_table_id
5888 || ctx->rule_cookie != unroll->rule_cookie)) {
1d361a81 5889 unroll = ofpact_put_UNROLL_XLATE(&ctx->frozen_actions);
e672ff9b
JR
5890 unroll->rule_table_id = ctx->table_id;
5891 unroll->rule_cookie = ctx->rule_cookie;
1d361a81 5892 ctx->frozen_actions.header = unroll;
e672ff9b
JR
5893 }
5894}
5895
5896
1d361a81
BP
5897/* Copy actions 'a' through 'end' to ctx->frozen_actions, which will be
5898 * executed after thawing. Inserts an UNROLL_XLATE action, if none is already
5899 * present, before any action that may depend on the current table ID or flow
5900 * cookie. */
e672ff9b 5901static void
1d361a81 5902freeze_unroll_actions(const struct ofpact *a, const struct ofpact *end,
e672ff9b
JR
5903 struct xlate_ctx *ctx)
5904{
c2b283b7 5905 for (; a < end; a = ofpact_next(a)) {
e672ff9b 5906 switch (a->type) {
e672ff9b 5907 case OFPACT_OUTPUT_REG:
aaca4fe0 5908 case OFPACT_OUTPUT_TRUNC:
e672ff9b
JR
5909 case OFPACT_GROUP:
5910 case OFPACT_OUTPUT:
5911 case OFPACT_CONTROLLER:
5912 case OFPACT_DEC_MPLS_TTL:
491e05c2 5913 case OFPACT_DEC_NSH_TTL:
e672ff9b 5914 case OFPACT_DEC_TTL:
83a31283
BP
5915 /* These actions may generate asynchronous messages, which include
5916 * table ID and flow cookie information. */
1d361a81 5917 freeze_put_unroll_xlate(ctx);
e672ff9b
JR
5918 break;
5919
83a31283
BP
5920 case OFPACT_RESUBMIT:
5921 if (ofpact_get_RESUBMIT(a)->table_id == 0xff) {
5922 /* This resubmit action is relative to the current table, so we
5923 * need to track what table that is.*/
1d361a81 5924 freeze_put_unroll_xlate(ctx);
83a31283
BP
5925 }
5926 break;
5927
e672ff9b
JR
5928 case OFPACT_SET_TUNNEL:
5929 case OFPACT_REG_MOVE:
5930 case OFPACT_SET_FIELD:
5931 case OFPACT_STACK_PUSH:
5932 case OFPACT_STACK_POP:
5933 case OFPACT_LEARN:
5934 case OFPACT_WRITE_METADATA:
83a31283 5935 case OFPACT_GOTO_TABLE:
e672ff9b
JR
5936 case OFPACT_ENQUEUE:
5937 case OFPACT_SET_VLAN_VID:
5938 case OFPACT_SET_VLAN_PCP:
5939 case OFPACT_STRIP_VLAN:
5940 case OFPACT_PUSH_VLAN:
5941 case OFPACT_SET_ETH_SRC:
5942 case OFPACT_SET_ETH_DST:
5943 case OFPACT_SET_IPV4_SRC:
5944 case OFPACT_SET_IPV4_DST:
5945 case OFPACT_SET_IP_DSCP:
5946 case OFPACT_SET_IP_ECN:
5947 case OFPACT_SET_IP_TTL:
5948 case OFPACT_SET_L4_SRC_PORT:
5949 case OFPACT_SET_L4_DST_PORT:
5950 case OFPACT_SET_QUEUE:
5951 case OFPACT_POP_QUEUE:
5952 case OFPACT_PUSH_MPLS:
5953 case OFPACT_POP_MPLS:
5954 case OFPACT_SET_MPLS_LABEL:
5955 case OFPACT_SET_MPLS_TC:
5956 case OFPACT_SET_MPLS_TTL:
5957 case OFPACT_MULTIPATH:
5958 case OFPACT_BUNDLE:
5959 case OFPACT_EXIT:
5960 case OFPACT_UNROLL_XLATE:
5961 case OFPACT_FIN_TIMEOUT:
5962 case OFPACT_CLEAR_ACTIONS:
5963 case OFPACT_WRITE_ACTIONS:
5964 case OFPACT_METER:
5965 case OFPACT_SAMPLE:
7ae62a67 5966 case OFPACT_CLONE:
f839892a
JS
5967 case OFPACT_ENCAP:
5968 case OFPACT_DECAP:
d4abaff5 5969 case OFPACT_DEBUG_RECIRC:
a934a3dd 5970 case OFPACT_DEBUG_SLOW:
07659514 5971 case OFPACT_CT:
72fe7578 5972 case OFPACT_CT_CLEAR:
9ac0aada 5973 case OFPACT_NAT:
5b34f8fc 5974 case OFPACT_CHECK_PKT_LARGER:
83a31283 5975 /* These may not generate PACKET INs. */
e672ff9b
JR
5976 break;
5977
e672ff9b
JR
5978 case OFPACT_NOTE:
5979 case OFPACT_CONJUNCTION:
83a31283 5980 /* These need not be copied for restoration. */
e672ff9b
JR
5981 continue;
5982 }
5983 /* Copy the action over. */
1d361a81 5984 ofpbuf_put(&ctx->frozen_actions, a, OFPACT_ALIGN(a->len));
e672ff9b
JR
5985 }
5986}
5987
8e53fe8c 5988static void
f2d105b5
JS
5989put_ct_mark(const struct flow *flow, struct ofpbuf *odp_actions,
5990 struct flow_wildcards *wc)
8e53fe8c 5991{
2a754f4a
JS
5992 if (wc->masks.ct_mark) {
5993 struct {
5994 uint32_t key;
5995 uint32_t mask;
5996 } *odp_ct_mark;
5997
5998 odp_ct_mark = nl_msg_put_unspec_uninit(odp_actions, OVS_CT_ATTR_MARK,
5999 sizeof(*odp_ct_mark));
6000 odp_ct_mark->key = flow->ct_mark & wc->masks.ct_mark;
6001 odp_ct_mark->mask = wc->masks.ct_mark;
8e53fe8c
JS
6002 }
6003}
6004
9daf2348 6005static void
f2d105b5
JS
6006put_ct_label(const struct flow *flow, struct ofpbuf *odp_actions,
6007 struct flow_wildcards *wc)
9daf2348 6008{
2ff8484b 6009 if (!ovs_u128_is_zero(wc->masks.ct_label)) {
9daf2348
JS
6010 struct {
6011 ovs_u128 key;
6012 ovs_u128 mask;
89cf41ec 6013 } odp_ct_label;
9daf2348 6014
89cf41ec
BP
6015 odp_ct_label.key = ovs_u128_and(flow->ct_label, wc->masks.ct_label);
6016 odp_ct_label.mask = wc->masks.ct_label;
6017 nl_msg_put_unspec(odp_actions, OVS_CT_ATTR_LABELS,
6018 &odp_ct_label, sizeof odp_ct_label);
9daf2348
JS
6019 }
6020}
6021
a13a0209
AT
6022static void
6023put_drop_action(struct ofpbuf *odp_actions, enum xlate_error error)
6024{
6025 nl_msg_put_u32(odp_actions, OVS_ACTION_ATTR_DROP, error);
6026}
6027
d787ad39 6028static void
2d9b49dd
BP
6029put_ct_helper(struct xlate_ctx *ctx,
6030 struct ofpbuf *odp_actions, struct ofpact_conntrack *ofc)
d787ad39
JS
6031{
6032 if (ofc->alg) {
40c7b2fc
JS
6033 switch(ofc->alg) {
6034 case IPPORT_FTP:
d787ad39 6035 nl_msg_put_string(odp_actions, OVS_CT_ATTR_HELPER, "ftp");
40c7b2fc
JS
6036 break;
6037 case IPPORT_TFTP:
6038 nl_msg_put_string(odp_actions, OVS_CT_ATTR_HELPER, "tftp");
6039 break;
6040 default:
2d9b49dd 6041 xlate_report_error(ctx, "cannot serialize ct_helper %d", ofc->alg);
40c7b2fc 6042 break;
d787ad39
JS
6043 }
6044 }
6045}
6046
187bb41f
YHW
6047static void
6048put_ct_timeout(struct ofpbuf *odp_actions, const struct dpif_backer *backer,
6049 const struct flow *flow, struct flow_wildcards *wc,
6050 uint16_t zone_id)
6051{
6052 bool unwildcard;
6053 char *tp_name = NULL;
6054
6055 if (ofproto_dpif_ct_zone_timeout_policy_get_name(backer, zone_id,
6056 ntohs(flow->dl_type), flow->nw_proto, &tp_name, &unwildcard)) {
6057 nl_msg_put_string(odp_actions, OVS_CT_ATTR_TIMEOUT, tp_name);
6058
6059 if (unwildcard) {
6060 /* The underlying datapath requires separate timeout
6061 * policies for different Ethertypes and IP protocols. We
6062 * don't need to unwildcard 'wc->masks.dl_type' since that
6063 * field is always unwildcarded in megaflows. */
6064 memset(&wc->masks.nw_proto, 0xff, sizeof wc->masks.nw_proto);
6065 }
6066 }
6067 free(tp_name);
6068}
6069
9ac0aada
JR
6070static void
6071put_ct_nat(struct xlate_ctx *ctx)
6072{
6073 struct ofpact_nat *ofn = ctx->ct_nat_action;
6074 size_t nat_offset;
6075
6076 if (!ofn) {
6077 return;
6078 }
6079
6080 nat_offset = nl_msg_start_nested(ctx->odp_actions, OVS_CT_ATTR_NAT);
6081 if (ofn->flags & NX_NAT_F_SRC || ofn->flags & NX_NAT_F_DST) {
6082 nl_msg_put_flag(ctx->odp_actions, ofn->flags & NX_NAT_F_SRC
6083 ? OVS_NAT_ATTR_SRC : OVS_NAT_ATTR_DST);
6084 if (ofn->flags & NX_NAT_F_PERSISTENT) {
6085 nl_msg_put_flag(ctx->odp_actions, OVS_NAT_ATTR_PERSISTENT);
6086 }
6087 if (ofn->flags & NX_NAT_F_PROTO_HASH) {
6088 nl_msg_put_flag(ctx->odp_actions, OVS_NAT_ATTR_PROTO_HASH);
6089 } else if (ofn->flags & NX_NAT_F_PROTO_RANDOM) {
6090 nl_msg_put_flag(ctx->odp_actions, OVS_NAT_ATTR_PROTO_RANDOM);
6091 }
6092 if (ofn->range_af == AF_INET) {
73e8bc23 6093 nl_msg_put_be32(ctx->odp_actions, OVS_NAT_ATTR_IP_MIN,
9ac0aada
JR
6094 ofn->range.addr.ipv4.min);
6095 if (ofn->range.addr.ipv4.max &&
73e8bc23
BP
6096 (ntohl(ofn->range.addr.ipv4.max)
6097 > ntohl(ofn->range.addr.ipv4.min))) {
6098 nl_msg_put_be32(ctx->odp_actions, OVS_NAT_ATTR_IP_MAX,
6099 ofn->range.addr.ipv4.max);
9ac0aada
JR
6100 }
6101 } else if (ofn->range_af == AF_INET6) {
6102 nl_msg_put_unspec(ctx->odp_actions, OVS_NAT_ATTR_IP_MIN,
6103 &ofn->range.addr.ipv6.min,
6104 sizeof ofn->range.addr.ipv6.min);
6105 if (!ipv6_mask_is_any(&ofn->range.addr.ipv6.max) &&
6106 memcmp(&ofn->range.addr.ipv6.max, &ofn->range.addr.ipv6.min,
6107 sizeof ofn->range.addr.ipv6.max) > 0) {
6108 nl_msg_put_unspec(ctx->odp_actions, OVS_NAT_ATTR_IP_MAX,
6109 &ofn->range.addr.ipv6.max,
6110 sizeof ofn->range.addr.ipv6.max);
6111 }
6112 }
6113 if (ofn->range_af != AF_UNSPEC && ofn->range.proto.min) {
6114 nl_msg_put_u16(ctx->odp_actions, OVS_NAT_ATTR_PROTO_MIN,
6115 ofn->range.proto.min);
6116 if (ofn->range.proto.max &&
6117 ofn->range.proto.max > ofn->range.proto.min) {
6118 nl_msg_put_u16(ctx->odp_actions, OVS_NAT_ATTR_PROTO_MAX,
6119 ofn->range.proto.max);
6120 }
6121 }
6122 }
6123 nl_msg_end_nested(ctx->odp_actions, nat_offset);
6124}
6125
07659514 6126static void
feee58b9
AZ
6127compose_conntrack_action(struct xlate_ctx *ctx, struct ofpact_conntrack *ofc,
6128 bool is_last_action)
07659514 6129{
f2d105b5 6130 ovs_u128 old_ct_label_mask = ctx->wc->masks.ct_label;
f2d105b5 6131 uint32_t old_ct_mark_mask = ctx->wc->masks.ct_mark;
07659514
JS
6132 size_t ct_offset;
6133 uint16_t zone;
6134
6135 /* Ensure that any prior actions are applied before composing the new
6136 * conntrack action. */
6137 xlate_commit_actions(ctx);
6138
8e53fe8c 6139 /* Process nested actions first, to populate the key. */
9ac0aada 6140 ctx->ct_nat_action = NULL;
f2d105b5 6141 ctx->wc->masks.ct_mark = 0;
f6fabcc6 6142 ctx->wc->masks.ct_label = OVS_U128_ZERO;
feee58b9 6143 do_xlate_actions(ofc->actions, ofpact_ct_get_action_len(ofc), ctx,
f5634764 6144 is_last_action, false);
8e53fe8c 6145
07659514
JS
6146 if (ofc->zone_src.field) {
6147 zone = mf_get_subfield(&ofc->zone_src, &ctx->xin->flow);
6148 } else {
6149 zone = ofc->zone_imm;
6150 }
6151
6152 ct_offset = nl_msg_start_nested(ctx->odp_actions, OVS_ACTION_ATTR_CT);
6153 if (ofc->flags & NX_CT_F_COMMIT) {
a76a37ef
JR
6154 nl_msg_put_flag(ctx->odp_actions, ofc->flags & NX_CT_F_FORCE ?
6155 OVS_CT_ATTR_FORCE_COMMIT : OVS_CT_ATTR_COMMIT);
adfe7a0b
JR
6156 if (ctx->xbridge->support.ct_eventmask) {
6157 nl_msg_put_u32(ctx->odp_actions, OVS_CT_ATTR_EVENTMASK,
975954af 6158 OVS_CT_EVENTMASK_DEFAULT);
adfe7a0b 6159 }
187bb41f
YHW
6160 if (ctx->xbridge->support.ct_timeout) {
6161 put_ct_timeout(ctx->odp_actions, ctx->xbridge->ofproto->backer,
6162 &ctx->xin->flow, ctx->wc, zone);
6163 }
07659514
JS
6164 }
6165 nl_msg_put_u16(ctx->odp_actions, OVS_CT_ATTR_ZONE, zone);
f2d105b5
JS
6166 put_ct_mark(&ctx->xin->flow, ctx->odp_actions, ctx->wc);
6167 put_ct_label(&ctx->xin->flow, ctx->odp_actions, ctx->wc);
2d9b49dd 6168 put_ct_helper(ctx, ctx->odp_actions, ofc);
9ac0aada
JR
6169 put_ct_nat(ctx);
6170 ctx->ct_nat_action = NULL;
07659514
JS
6171 nl_msg_end_nested(ctx->odp_actions, ct_offset);
6172
f2d105b5 6173 ctx->wc->masks.ct_mark = old_ct_mark_mask;
f2d105b5 6174 ctx->wc->masks.ct_label = old_ct_label_mask;
8e53fe8c 6175
f6fabcc6 6176 if (ofc->recirc_table != NX_CT_RECIRC_NONE) {
07659514 6177 ctx->conntracked = true;
5fdd80cc 6178 compose_recirculate_and_fork(ctx, ofc->recirc_table, zone);
07659514 6179 }
f6fabcc6
JP
6180
6181 /* The ct_* fields are only available in the scope of the 'recirc_table'
6182 * call chain. */
6183 flow_clear_conntrack(&ctx->xin->flow);
5a046d3b
YHW
6184 xlate_report(ctx, OFT_DETAIL, "Sets the packet to an untracked state, "
6185 "and clears all the conntrack fields.");
f6fabcc6 6186 ctx->conntracked = false;
07659514
JS
6187}
6188
1fe178d2
EG
6189static void
6190compose_ct_clear_action(struct xlate_ctx *ctx)
6191{
6192 clear_conntrack(ctx);
6193 /* This action originally existed without dpif support. So to preserve
6194 * compatibility, only append it if the dpif supports it. */
6195 if (ctx->xbridge->support.ct_clear) {
6196 nl_msg_put_flag(ctx->odp_actions, OVS_ACTION_ATTR_CT_CLEAR);
6197 }
6198}
6199
5b34f8fc
NS
6200/* check_pkt_larger action checks the packet length and stores the
6201 * result in the register bit. We translate this action to the
6202 * datapath action - 'check_pkt_len' whose format
6203 * is: 'check_pkt_len(pkt_len, ge(actions), le(actions))'.
6204 *
6205 * We first set the destination register bit to 1 and call
6206 * 'do_xlate_actions' for the case - packet len greater than
6207 * the specified packet length.
6208 *
6209 * We then set the destination register bit to 0 and call
6210 * 'do_xlate_actions' for the case - packet length is lesser or
6211 * equal to the specified packet length.
6212 *
6213 * It is possible for freezing to happen for both the cases.
6214 */
6215static void
6216xlate_check_pkt_larger(struct xlate_ctx *ctx,
6217 struct ofpact_check_pkt_larger *check_pkt_larger,
6218 const struct ofpact *remaining_acts,
6219 size_t remaining_acts_len)
6220{
6221 union mf_subvalue value;
6222 memset(&value, 0, sizeof value);
6223 if (!ctx->xbridge->support.check_pkt_len) {
6224 uint8_t is_pkt_larger = 0;
6225 if (ctx->xin->packet) {
6226 is_pkt_larger =
6227 dp_packet_size(ctx->xin->packet) > check_pkt_larger->pkt_len;
6228 }
6229 value.u8_val = is_pkt_larger;
6230 mf_write_subfield_flow(&check_pkt_larger->dst, &value,
6231 &ctx->xin->flow);
6232 /* If datapath doesn't support check_pkt_len action, then set the
6233 * SLOW_ACTION flag. If we don't set SLOW_ACTION, we
6234 * will push a flow to the datapath based on the packet length
6235 * in ctx->xin->packet. For subsequent patches which match the
6236 * same flow, datapath will apply the actions without considering
6237 * the packet length. This results in wrong actions being applied.
6238 */
6239 ctx->xout->slow |= SLOW_ACTION;
6240 return;
6241 }
6242
6243 struct ofpbuf old_stack = ctx->stack;
6244 union mf_subvalue new_stack[1024 / sizeof(union mf_subvalue)];
6245 ofpbuf_use_stub(&ctx->stack, new_stack, sizeof new_stack);
6246 ofpbuf_put(&ctx->stack, old_stack.data, old_stack.size);
6247
6248 struct ofpbuf old_action_set = ctx->action_set;
6249 uint64_t actset_stub[1024 / 8];
6250 ofpbuf_use_stub(&ctx->action_set, actset_stub, sizeof actset_stub);
6251 ofpbuf_put(&ctx->action_set, old_action_set.data, old_action_set.size);
6252
6253 struct flow old_flow = ctx->xin->flow;
6254 xlate_commit_actions(ctx);
6255 struct flow old_base = ctx->base_flow;
6256 bool old_was_mpls = ctx->was_mpls;
6257 bool old_conntracked = ctx->conntracked;
6258
6259 size_t offset = nl_msg_start_nested(ctx->odp_actions,
6260 OVS_ACTION_ATTR_CHECK_PKT_LEN);
6261 nl_msg_put_u16(ctx->odp_actions, OVS_CHECK_PKT_LEN_ATTR_PKT_LEN,
6262 check_pkt_larger->pkt_len);
6263 size_t offset_attr = nl_msg_start_nested(
6264 ctx->odp_actions, OVS_CHECK_PKT_LEN_ATTR_ACTIONS_IF_GREATER);
6265 value.u8_val = 1;
6266 mf_write_subfield_flow(&check_pkt_larger->dst, &value, &ctx->xin->flow);
6267 do_xlate_actions(remaining_acts, remaining_acts_len, ctx, true, false);
6268 if (!ctx->freezing) {
6269 xlate_action_set(ctx);
6270 }
6271 if (ctx->freezing) {
6272 finish_freezing(ctx);
6273 }
6274 nl_msg_end_nested(ctx->odp_actions, offset_attr);
6275
6276 ctx->base_flow = old_base;
6277 ctx->was_mpls = old_was_mpls;
6278 ctx->conntracked = old_conntracked;
6279 ctx->xin->flow = old_flow;
6280
6281 /* If the flow translation for the IF_GREATER case requires freezing,
6282 * then ctx->exit would be true. Reset to false so that we can
6283 * do flow translation for 'IF_LESS_EQUAL' case. finish_freezing()
6284 * would have taken care of Undoing the changes done for freeze. */
6285 ctx->exit = false;
6286
6287 offset_attr = nl_msg_start_nested(
6288 ctx->odp_actions, OVS_CHECK_PKT_LEN_ATTR_ACTIONS_IF_LESS_EQUAL);
6289 value.u8_val = 0;
6290 mf_write_subfield_flow(&check_pkt_larger->dst, &value, &ctx->xin->flow);
6291 do_xlate_actions(remaining_acts, remaining_acts_len, ctx, true, false);
6292 if (!ctx->freezing) {
6293 xlate_action_set(ctx);
6294 }
6295 if (ctx->freezing) {
6296 finish_freezing(ctx);
6297 }
6298 nl_msg_end_nested(ctx->odp_actions, offset_attr);
6299 nl_msg_end_nested(ctx->odp_actions, offset);
6300
6301 ofpbuf_uninit(&ctx->action_set);
6302 ctx->action_set = old_action_set;
6303 ofpbuf_uninit(&ctx->stack);
6304 ctx->stack = old_stack;
6305 ctx->base_flow = old_base;
6306 ctx->was_mpls = old_was_mpls;
6307 ctx->conntracked = old_conntracked;
6308 ctx->xin->flow = old_flow;
6309 ctx->exit = true;
6310}
6311
f839892a
JS
6312static void
6313rewrite_flow_encap_ethernet(struct xlate_ctx *ctx,
6314 struct flow *flow,
6315 struct flow_wildcards *wc)
6316{
6317 wc->masks.packet_type = OVS_BE32_MAX;
6318 if (pt_ns(flow->packet_type) == OFPHTN_ETHERTYPE) {
6319 /* Only adjust the packet_type and zero the dummy Ethernet addresses. */
6320 ovs_be16 ethertype = pt_ns_type_be(flow->packet_type);
6321 flow->packet_type = htonl(PT_ETH);
6322 flow->dl_src = eth_addr_zero;
6323 flow->dl_dst = eth_addr_zero;
6324 flow->dl_type = ethertype;
6325 } else {
1fc11c59 6326 /* Error handling: drop packet. */
f839892a 6327 xlate_report_debug(ctx, OFT_ACTION,
1fc11c59
JS
6328 "Dropping packet as encap(ethernet) is not "
6329 "supported for packet type ethernet.");
7873e106 6330 ctx->error = XLATE_UNSUPPORTED_PACKET_TYPE;
f839892a
JS
6331 }
6332}
6333
1fc11c59
JS
6334/* For an MD2 NSH header returns a pointer to an ofpbuf with the encoded
6335 * MD2 TLVs provided as encap properties to the encap operation. This
f59cb331 6336 * will be stored as encap_data in the ctx and copied into the push_nsh
1fc11c59
JS
6337 * action at the next commit. */
6338static struct ofpbuf *
f59cb331
YY
6339rewrite_flow_push_nsh(struct xlate_ctx *ctx,
6340 const struct ofpact_encap *encap,
6341 struct flow *flow,
6342 struct flow_wildcards *wc)
1fc11c59
JS
6343{
6344 ovs_be32 packet_type = flow->packet_type;
6345 const char *ptr = (char *) encap->props;
f59cb331 6346 struct ofpbuf *buf = ofpbuf_new(NSH_CTX_HDRS_MAX_LEN);
1fc11c59
JS
6347 uint8_t md_type = NSH_M_TYPE1;
6348 uint8_t np = 0;
6349 int i;
6350
6351 /* Scan the optional NSH encap TLV properties, if any. */
6352 for (i = 0; i < encap->n_props; i++) {
6353 struct ofpact_ed_prop *prop_ptr =
6354 ALIGNED_CAST(struct ofpact_ed_prop *, ptr);
6355 if (prop_ptr->prop_class == OFPPPC_NSH) {
6356 switch (prop_ptr->type) {
6357 case OFPPPT_PROP_NSH_MDTYPE: {
6358 struct ofpact_ed_prop_nsh_md_type *prop_md_type =
6359 ALIGNED_CAST(struct ofpact_ed_prop_nsh_md_type *,
6360 prop_ptr);
6361 md_type = prop_md_type->md_type;
6362 break;
6363 }
6364 case OFPPPT_PROP_NSH_TLV: {
6365 struct ofpact_ed_prop_nsh_tlv *tlv_prop =
6366 ALIGNED_CAST(struct ofpact_ed_prop_nsh_tlv *,
6367 prop_ptr);
6368 struct nsh_md2_tlv *md2_ctx =
6369 ofpbuf_put_uninit(buf, sizeof(*md2_ctx));
6370 md2_ctx->md_class = tlv_prop->tlv_class;
6371 md2_ctx->type = tlv_prop->tlv_type;
6372 md2_ctx->length = tlv_prop->tlv_len;
6373 size_t len = ROUND_UP(md2_ctx->length, 4);
6374 size_t padding = len - md2_ctx->length;
6375 ofpbuf_put(buf, tlv_prop->data, md2_ctx->length);
6376 ofpbuf_put_zeros(buf, padding);
6377 break;
6378 }
6379 default:
6380 /* No other NSH encap properties defined yet. */
6381 break;
6382 }
6383 }
6384 ptr += ROUND_UP(prop_ptr->len, 8);
6385 }
f59cb331 6386 if (buf->size == 0 || buf->size > NSH_CTX_HDRS_MAX_LEN) {
1fc11c59
JS
6387 ofpbuf_delete(buf);
6388 buf = NULL;
6389 }
6390
6391 /* Determine the Next Protocol field for NSH header. */
6392 switch (ntohl(packet_type)) {
6393 case PT_ETH:
6394 np = NSH_P_ETHERNET;
6395 break;
6396 case PT_IPV4:
6397 np = NSH_P_IPV4;
6398 break;
6399 case PT_IPV6:
6400 np = NSH_P_IPV6;
6401 break;
6402 case PT_NSH:
6403 np = NSH_P_NSH;
6404 break;
6405 default:
6406 /* Error handling: drop packet. */
6407 xlate_report_debug(ctx, OFT_ACTION,
6408 "Dropping packet as encap(nsh) is not "
6409 "supported for packet type (%d,0x%x)",
6410 pt_ns(packet_type), pt_ns_type(packet_type));
7873e106 6411 ctx->error = XLATE_UNSUPPORTED_PACKET_TYPE;
1fc11c59
JS
6412 return buf;
6413 }
6414 /* Note that we have matched on packet_type! */
6415 wc->masks.packet_type = OVS_BE32_MAX;
6416
6417 /* Reset all current flow packet headers. */
6418 memset(&flow->dl_dst, 0,
6419 sizeof(struct flow) - offsetof(struct flow, dl_dst));
6420
6421 /* Populate the flow with the new NSH header. */
6422 flow->packet_type = htonl(PT_NSH);
6423 flow->dl_type = htons(ETH_TYPE_NSH);
17553f27
YY
6424 flow->nsh.flags = 0;
6425 flow->nsh.ttl = 63;
1fc11c59 6426 flow->nsh.np = np;
17553f27 6427 flow->nsh.path_hdr = htonl(255);
1fc11c59
JS
6428
6429 if (md_type == NSH_M_TYPE1) {
6430 flow->nsh.mdtype = NSH_M_TYPE1;
f59cb331 6431 memset(flow->nsh.context, 0, sizeof flow->nsh.context);
1fc11c59
JS
6432 if (buf) {
6433 /* Drop any MD2 context TLVs. */
6434 ofpbuf_delete(buf);
6435 buf = NULL;
6436 }
6437 } else if (md_type == NSH_M_TYPE2) {
6438 flow->nsh.mdtype = NSH_M_TYPE2;
6439 }
17553f27 6440 flow->nsh.mdtype &= NSH_MDTYPE_MASK;
1fc11c59
JS
6441
6442 return buf;
6443}
6444
f839892a
JS
6445static void
6446xlate_generic_encap_action(struct xlate_ctx *ctx,
6447 const struct ofpact_encap *encap)
6448{
6449 struct flow *flow = &ctx->xin->flow;
6450 struct flow_wildcards *wc = ctx->wc;
1fc11c59 6451 struct ofpbuf *encap_data = NULL;
f839892a
JS
6452
6453 /* Ensure that any pending actions on the inner packet are applied before
6454 * rewriting the flow */
6455 xlate_commit_actions(ctx);
6456
6457 /* Rewrite the flow to reflect the effect of pushing the new encap header. */
6458 switch (ntohl(encap->new_pkt_type)) {
6459 case PT_ETH:
6460 rewrite_flow_encap_ethernet(ctx, flow, wc);
6461 break;
1fc11c59 6462 case PT_NSH:
f59cb331 6463 encap_data = rewrite_flow_push_nsh(ctx, encap, flow, wc);
1fc11c59 6464 break;
f839892a 6465 default:
1fc11c59
JS
6466 /* New packet type was checked during decoding. */
6467 OVS_NOT_REACHED();
f839892a
JS
6468 }
6469
6470 if (!ctx->error) {
6471 /* The actual encap datapath action will be generated at next commit. */
6472 ctx->pending_encap = true;
1fc11c59 6473 ctx->encap_data = encap_data;
f839892a
JS
6474 }
6475}
6476
6477/* Returns true if packet must be recirculated after decapsulation. */
6478static bool
6479xlate_generic_decap_action(struct xlate_ctx *ctx,
6480 const struct ofpact_decap *decap OVS_UNUSED)
6481{
6482 struct flow *flow = &ctx->xin->flow;
6483
6484 /* Ensure that any pending actions on the current packet are applied
6485 * before generating the decap action. */
6486 xlate_commit_actions(ctx);
6487
6488 /* We assume for now that the new_pkt_type is PT_USE_NEXT_PROTO. */
6489 switch (ntohl(flow->packet_type)) {
6490 case PT_ETH:
6491 if (flow->vlans[0].tci & htons(VLAN_CFI)) {
6492 /* Error handling: drop packet. */
6493 xlate_report_debug(ctx, OFT_ACTION, "Dropping packet, cannot "
6494 "decap Ethernet if VLAN is present.");
7873e106 6495 ctx->error = XLATE_UNSUPPORTED_PACKET_TYPE;
f839892a
JS
6496 } else {
6497 /* Just change the packet_type.
6498 * Delay generating pop_eth to the next commit. */
6499 flow->packet_type = htonl(PACKET_TYPE(OFPHTN_ETHERTYPE,
6500 ntohs(flow->dl_type)));
6501 ctx->wc->masks.dl_type = OVS_BE16_MAX;
6502 }
6503 return false;
1fc11c59 6504 case PT_NSH:
f59cb331 6505 /* The pop_nsh action is generated at the commit executed as
1fc11c59
JS
6506 * part of freezing the ctx for recirculation. Here we just set
6507 * the new packet type based on the NSH next protocol field. */
6508 switch (flow->nsh.np) {
6509 case NSH_P_ETHERNET:
6510 flow->packet_type = htonl(PT_ETH);
6511 break;
6512 case NSH_P_IPV4:
6513 flow->packet_type = htonl(PT_IPV4);
6514 break;
6515 case NSH_P_IPV6:
6516 flow->packet_type = htonl(PT_IPV6);
6517 break;
6518 case NSH_P_NSH:
6519 flow->packet_type = htonl(PT_NSH);
6520 break;
6521 default:
6522 /* Error handling: drop packet. */
6523 xlate_report_debug(ctx, OFT_ACTION,
6524 "Dropping packet as NSH next protocol %d "
6525 "is not supported", flow->nsh.np);
7873e106 6526 ctx->error = XLATE_UNSUPPORTED_PACKET_TYPE;
1fc11c59
JS
6527 return false;
6528 break;
6529 }
6530 ctx->wc->masks.nsh.np = UINT8_MAX;
88ec1e0a 6531 ctx->pending_decap = true;
1fc11c59
JS
6532 /* Trigger recirculation. */
6533 return true;
f839892a 6534 default:
1fc11c59
JS
6535 /* Error handling: drop packet. */
6536 xlate_report_debug(
6537 ctx, OFT_ACTION,
6538 "Dropping packet as the decap() does not support "
6539 "packet type (%d,0x%x)",
6540 pt_ns(flow->packet_type), pt_ns_type(flow->packet_type));
7873e106 6541 ctx->error = XLATE_UNSUPPORTED_PACKET_TYPE;
f839892a
JS
6542 return false;
6543 }
6544}
6545
e12ec36b
SH
6546static void
6547recirc_for_mpls(const struct ofpact *a, struct xlate_ctx *ctx)
6548{
6549 /* No need to recirculate if already exiting. */
6550 if (ctx->exit) {
6551 return;
6552 }
6553
6554 /* Do not consider recirculating unless the packet was previously MPLS. */
6555 if (!ctx->was_mpls) {
6556 return;
6557 }
6558
6559 /* Special case these actions, only recirculating if necessary.
6560 * This avoids the overhead of recirculation in common use-cases.
6561 */
6562 switch (a->type) {
6563
6564 /* Output actions do not require recirculation. */
6565 case OFPACT_OUTPUT:
aaca4fe0 6566 case OFPACT_OUTPUT_TRUNC:
e12ec36b
SH
6567 case OFPACT_ENQUEUE:
6568 case OFPACT_OUTPUT_REG:
6569 /* Set actions that don't touch L3+ fields do not require recirculation. */
6570 case OFPACT_SET_VLAN_VID:
6571 case OFPACT_SET_VLAN_PCP:
6572 case OFPACT_SET_ETH_SRC:
6573 case OFPACT_SET_ETH_DST:
6574 case OFPACT_SET_TUNNEL:
6575 case OFPACT_SET_QUEUE:
6576 /* If actions of a group require recirculation that can be detected
6577 * when translating them. */
6578 case OFPACT_GROUP:
6579 return;
6580
6581 /* Set field that don't touch L3+ fields don't require recirculation. */
6582 case OFPACT_SET_FIELD:
6583 if (mf_is_l3_or_higher(ofpact_get_SET_FIELD(a)->field)) {
6584 break;
6585 }
6586 return;
6587
6588 /* For simplicity, recirculate in all other cases. */
6589 case OFPACT_CONTROLLER:
6590 case OFPACT_BUNDLE:
6591 case OFPACT_STRIP_VLAN:
6592 case OFPACT_PUSH_VLAN:
6593 case OFPACT_SET_IPV4_SRC:
6594 case OFPACT_SET_IPV4_DST:
6595 case OFPACT_SET_IP_DSCP:
6596 case OFPACT_SET_IP_ECN:
6597 case OFPACT_SET_IP_TTL:
6598 case OFPACT_SET_L4_SRC_PORT:
6599 case OFPACT_SET_L4_DST_PORT:
6600 case OFPACT_REG_MOVE:
6601 case OFPACT_STACK_PUSH:
6602 case OFPACT_STACK_POP:
6603 case OFPACT_DEC_TTL:
6604 case OFPACT_SET_MPLS_LABEL:
6605 case OFPACT_SET_MPLS_TC:
6606 case OFPACT_SET_MPLS_TTL:
6607 case OFPACT_DEC_MPLS_TTL:
6608 case OFPACT_PUSH_MPLS:
6609 case OFPACT_POP_MPLS:
6610 case OFPACT_POP_QUEUE:
6611 case OFPACT_FIN_TIMEOUT:
6612 case OFPACT_RESUBMIT:
6613 case OFPACT_LEARN:
6614 case OFPACT_CONJUNCTION:
6615 case OFPACT_MULTIPATH:
6616 case OFPACT_NOTE:
6617 case OFPACT_EXIT:
6618 case OFPACT_SAMPLE:
7ae62a67 6619 case OFPACT_CLONE:
f839892a
JS
6620 case OFPACT_ENCAP:
6621 case OFPACT_DECAP:
491e05c2 6622 case OFPACT_DEC_NSH_TTL:
e12ec36b
SH
6623 case OFPACT_UNROLL_XLATE:
6624 case OFPACT_CT:
72fe7578 6625 case OFPACT_CT_CLEAR:
e12ec36b
SH
6626 case OFPACT_NAT:
6627 case OFPACT_DEBUG_RECIRC:
a934a3dd 6628 case OFPACT_DEBUG_SLOW:
e12ec36b
SH
6629 case OFPACT_METER:
6630 case OFPACT_CLEAR_ACTIONS:
6631 case OFPACT_WRITE_ACTIONS:
6632 case OFPACT_WRITE_METADATA:
6633 case OFPACT_GOTO_TABLE:
5b34f8fc 6634 case OFPACT_CHECK_PKT_LARGER:
e12ec36b
SH
6635 default:
6636 break;
6637 }
6638
6639 /* Recirculate */
6640 ctx_trigger_freeze(ctx);
6641}
6642
2d9b49dd
BP
6643static void
6644xlate_ofpact_reg_move(struct xlate_ctx *ctx, const struct ofpact_reg_move *a)
6645{
6646 mf_subfield_copy(&a->src, &a->dst, &ctx->xin->flow, ctx->wc);
6647 xlate_report_subfield(ctx, &a->dst);
6648}
6649
6650static void
6651xlate_ofpact_stack_pop(struct xlate_ctx *ctx, const struct ofpact_stack *a)
6652{
6653 if (nxm_execute_stack_pop(a, &ctx->xin->flow, ctx->wc, &ctx->stack)) {
6654 xlate_report_subfield(ctx, &a->subfield);
6655 } else {
6656 xlate_report_error(ctx, "stack underflow");
6657 }
6658}
6659
6660/* Restore translation context data that was stored earlier. */
6661static void
6662xlate_ofpact_unroll_xlate(struct xlate_ctx *ctx,
6663 const struct ofpact_unroll_xlate *a)
6664{
6665 ctx->table_id = a->rule_table_id;
6666 ctx->rule_cookie = a->rule_cookie;
6667 xlate_report(ctx, OFT_THAW, "restored state: table=%"PRIu8", "
6668 "cookie=%#"PRIx64, a->rule_table_id, a->rule_cookie);
6669}
6670
9583bc14
EJ
6671static void
6672do_xlate_actions(const struct ofpact *ofpacts, size_t ofpacts_len,
f5634764
KG
6673 struct xlate_ctx *ctx, bool is_last_action,
6674 bool group_bucket_action)
9583bc14 6675{
49a73e0c 6676 struct flow_wildcards *wc = ctx->wc;
33bf9176 6677 struct flow *flow = &ctx->xin->flow;
9583bc14
EJ
6678 const struct ofpact *a;
6679
f47ea021
JR
6680 /* dl_type already in the mask, not set below. */
6681
2d9b49dd
BP
6682 if (!ofpacts_len) {
6683 xlate_report(ctx, OFT_ACTION, "drop");
6684 return;
6685 }
6686
9583bc14
EJ
6687 OFPACT_FOR_EACH (a, ofpacts, ofpacts_len) {
6688 struct ofpact_controller *controller;
6689 const struct ofpact_metadata *metadata;
b2dd70be
JR
6690 const struct ofpact_set_field *set_field;
6691 const struct mf_field *mf;
feee58b9
AZ
6692 bool last = is_last_action && ofpact_last(a, ofpacts, ofpacts_len)
6693 && ctx->action_set.size;
9583bc14 6694
fff1b9c0
JR
6695 if (ctx->error) {
6696 break;
6697 }
6698
e12ec36b
SH
6699 recirc_for_mpls(a, ctx);
6700
e672ff9b
JR
6701 if (ctx->exit) {
6702 /* Check if need to store the remaining actions for later
6703 * execution. */
1d361a81
BP
6704 if (ctx->freezing) {
6705 freeze_unroll_actions(a, ofpact_end(ofpacts, ofpacts_len),
e672ff9b
JR
6706 ctx);
6707 }
6708 break;
7bbdd84f
SH
6709 }
6710
2d9b49dd
BP
6711 if (OVS_UNLIKELY(ctx->xin->trace)) {
6712 struct ds s = DS_EMPTY_INITIALIZER;
efefbcae
BP
6713 struct ofpact_format_params fp = { .s = &s };
6714 ofpacts_format(a, OFPACT_ALIGN(a->len), &fp);
2d9b49dd
BP
6715 xlate_report(ctx, OFT_ACTION, "%s", ds_cstr(&s));
6716 ds_destroy(&s);
6717 }
6718
9583bc14
EJ
6719 switch (a->type) {
6720 case OFPACT_OUTPUT:
6721 xlate_output_action(ctx, ofpact_get_OUTPUT(a)->port,
8b496c72 6722 ofpact_get_OUTPUT(a)->max_len, true, last,
f5634764 6723 false, group_bucket_action);
9583bc14
EJ
6724 break;
6725
7395c052 6726 case OFPACT_GROUP:
feee58b9 6727 if (xlate_group_action(ctx, ofpact_get_GROUP(a)->group_id, last)) {
1d741d6d 6728 /* Group could not be found. */
db88b35c
JR
6729
6730 /* XXX: Terminates action list translation, but does not
6731 * terminate the pipeline. */
f4fb341b
SH
6732 return;
6733 }
7395c052
NZ
6734 break;
6735
9583bc14
EJ
6736 case OFPACT_CONTROLLER:
6737 controller = ofpact_get_CONTROLLER(a);
77ab5fd2
BP
6738 if (controller->pause) {
6739 ctx->pause = controller;
77ab5fd2
BP
6740 ctx_trigger_freeze(ctx);
6741 a = ofpact_next(a);
6742 } else {
d39ec23d
JP
6743 xlate_controller_action(ctx, controller->max_len,
6744 controller->reason,
6745 controller->controller_id,
206ddb9a 6746 controller->provider_meter_id,
d39ec23d
JP
6747 controller->userdata,
6748 controller->userdata_len);
77ab5fd2 6749 }
9583bc14
EJ
6750 break;
6751
6752 case OFPACT_ENQUEUE:
16194afd
DDP
6753 memset(&wc->masks.skb_priority, 0xff,
6754 sizeof wc->masks.skb_priority);
f5634764
KG
6755 xlate_enqueue_action(ctx, ofpact_get_ENQUEUE(a), last,
6756 group_bucket_action);
9583bc14
EJ
6757 break;
6758
6759 case OFPACT_SET_VLAN_VID:
f0fb825a
EG
6760 wc->masks.vlans[0].tci |= htons(VLAN_VID_MASK | VLAN_CFI);
6761 if (flow->vlans[0].tci & htons(VLAN_CFI) ||
ca287d20 6762 ofpact_get_SET_VLAN_VID(a)->push_vlan_if_needed) {
f0fb825a
EG
6763 if (!flow->vlans[0].tpid) {
6764 flow->vlans[0].tpid = htons(ETH_TYPE_VLAN);
6765 }
6766 flow->vlans[0].tci &= ~htons(VLAN_VID_MASK);
6767 flow->vlans[0].tci |=
6768 (htons(ofpact_get_SET_VLAN_VID(a)->vlan_vid) |
6769 htons(VLAN_CFI));
ca287d20 6770 }
9583bc14
EJ
6771 break;
6772
6773 case OFPACT_SET_VLAN_PCP:
f0fb825a
EG
6774 wc->masks.vlans[0].tci |= htons(VLAN_PCP_MASK | VLAN_CFI);
6775 if (flow->vlans[0].tci & htons(VLAN_CFI) ||
ca287d20 6776 ofpact_get_SET_VLAN_PCP(a)->push_vlan_if_needed) {
f0fb825a
EG
6777 if (!flow->vlans[0].tpid) {
6778 flow->vlans[0].tpid = htons(ETH_TYPE_VLAN);
6779 }
6780 flow->vlans[0].tci &= ~htons(VLAN_PCP_MASK);
6781 flow->vlans[0].tci |=
6782 htons((ofpact_get_SET_VLAN_PCP(a)->vlan_pcp
6783 << VLAN_PCP_SHIFT) | VLAN_CFI);
ca287d20 6784 }
9583bc14
EJ
6785 break;
6786
6787 case OFPACT_STRIP_VLAN:
f0fb825a 6788 flow_pop_vlan(flow, wc);
9583bc14
EJ
6789 break;
6790
6791 case OFPACT_PUSH_VLAN:
f0fb825a
EG
6792 flow_push_vlan_uninit(flow, wc);
6793 flow->vlans[0].tpid = ofpact_get_PUSH_VLAN(a)->ethertype;
6794 flow->vlans[0].tci = htons(VLAN_CFI);
9583bc14
EJ
6795 break;
6796
6797 case OFPACT_SET_ETH_SRC:
74ff3298
JR
6798 WC_MASK_FIELD(wc, dl_src);
6799 flow->dl_src = ofpact_get_SET_ETH_SRC(a)->mac;
9583bc14
EJ
6800 break;
6801
6802 case OFPACT_SET_ETH_DST:
74ff3298
JR
6803 WC_MASK_FIELD(wc, dl_dst);
6804 flow->dl_dst = ofpact_get_SET_ETH_DST(a)->mac;
9583bc14
EJ
6805 break;
6806
6807 case OFPACT_SET_IPV4_SRC:
33bf9176 6808 if (flow->dl_type == htons(ETH_TYPE_IP)) {
f47ea021 6809 memset(&wc->masks.nw_src, 0xff, sizeof wc->masks.nw_src);
33bf9176 6810 flow->nw_src = ofpact_get_SET_IPV4_SRC(a)->ipv4;
9583bc14
EJ
6811 }
6812 break;
6813
6814 case OFPACT_SET_IPV4_DST:
33bf9176 6815 if (flow->dl_type == htons(ETH_TYPE_IP)) {
f47ea021 6816 memset(&wc->masks.nw_dst, 0xff, sizeof wc->masks.nw_dst);
33bf9176 6817 flow->nw_dst = ofpact_get_SET_IPV4_DST(a)->ipv4;
9583bc14
EJ
6818 }
6819 break;
6820
04f01c24
BP
6821 case OFPACT_SET_IP_DSCP:
6822 if (is_ip_any(flow)) {
f47ea021 6823 wc->masks.nw_tos |= IP_DSCP_MASK;
33bf9176 6824 flow->nw_tos &= ~IP_DSCP_MASK;
04f01c24 6825 flow->nw_tos |= ofpact_get_SET_IP_DSCP(a)->dscp;
9583bc14
EJ
6826 }
6827 break;
6828
ff14eb7a
JR
6829 case OFPACT_SET_IP_ECN:
6830 if (is_ip_any(flow)) {
6831 wc->masks.nw_tos |= IP_ECN_MASK;
6832 flow->nw_tos &= ~IP_ECN_MASK;
6833 flow->nw_tos |= ofpact_get_SET_IP_ECN(a)->ecn;
6834 }
6835 break;
6836
0c20dbe4
JR
6837 case OFPACT_SET_IP_TTL:
6838 if (is_ip_any(flow)) {
6839 wc->masks.nw_ttl = 0xff;
6840 flow->nw_ttl = ofpact_get_SET_IP_TTL(a)->ttl;
6841 }
6842 break;
6843
9583bc14 6844 case OFPACT_SET_L4_SRC_PORT:
b8778a0d 6845 if (is_ip_any(flow) && !(flow->nw_frag & FLOW_NW_FRAG_LATER)) {
f47ea021
JR
6846 memset(&wc->masks.nw_proto, 0xff, sizeof wc->masks.nw_proto);
6847 memset(&wc->masks.tp_src, 0xff, sizeof wc->masks.tp_src);
33bf9176 6848 flow->tp_src = htons(ofpact_get_SET_L4_SRC_PORT(a)->port);
9583bc14
EJ
6849 }
6850 break;
6851
6852 case OFPACT_SET_L4_DST_PORT:
b8778a0d 6853 if (is_ip_any(flow) && !(flow->nw_frag & FLOW_NW_FRAG_LATER)) {
f47ea021
JR
6854 memset(&wc->masks.nw_proto, 0xff, sizeof wc->masks.nw_proto);
6855 memset(&wc->masks.tp_dst, 0xff, sizeof wc->masks.tp_dst);
33bf9176 6856 flow->tp_dst = htons(ofpact_get_SET_L4_DST_PORT(a)->port);
9583bc14
EJ
6857 }
6858 break;
6859
6860 case OFPACT_RESUBMIT:
8bf009bf
JR
6861 /* Freezing complicates resubmit. Some action in the flow
6862 * entry found by resubmit might trigger freezing. If that
6863 * happens, then we do not want to execute the resubmit again after
6864 * during thawing, so we want to skip back to the head of the loop
6865 * to avoid that, only adding any actions that follow the resubmit
6866 * to the frozen actions.
6b1c5734 6867 */
feee58b9 6868 xlate_ofpact_resubmit(ctx, ofpact_get_RESUBMIT(a), last);
6b1c5734 6869 continue;
9583bc14
EJ
6870
6871 case OFPACT_SET_TUNNEL:
33bf9176 6872 flow->tunnel.tun_id = htonll(ofpact_get_SET_TUNNEL(a)->tun_id);
9583bc14
EJ
6873 break;
6874
6875 case OFPACT_SET_QUEUE:
16194afd
DDP
6876 memset(&wc->masks.skb_priority, 0xff,
6877 sizeof wc->masks.skb_priority);
9583bc14
EJ
6878 xlate_set_queue_action(ctx, ofpact_get_SET_QUEUE(a)->queue_id);
6879 break;
6880
6881 case OFPACT_POP_QUEUE:
16194afd
DDP
6882 memset(&wc->masks.skb_priority, 0xff,
6883 sizeof wc->masks.skb_priority);
2d9b49dd
BP
6884 if (flow->skb_priority != ctx->orig_skb_priority) {
6885 flow->skb_priority = ctx->orig_skb_priority;
6886 xlate_report(ctx, OFT_DETAIL, "queue = %#"PRIx32,
6887 flow->skb_priority);
6888 }
9583bc14
EJ
6889 break;
6890
6891 case OFPACT_REG_MOVE:
2d9b49dd 6892 xlate_ofpact_reg_move(ctx, ofpact_get_REG_MOVE(a));
9583bc14
EJ
6893 break;
6894
b2dd70be
JR
6895 case OFPACT_SET_FIELD:
6896 set_field = ofpact_get_SET_FIELD(a);
6897 mf = set_field->field;
b2dd70be 6898
aff49b8c
JR
6899 /* Set the field only if the packet actually has it. */
6900 if (mf_are_prereqs_ok(mf, flow, wc)) {
128684a6
JR
6901 mf_mask_field_masked(mf, ofpact_set_field_mask(set_field), wc);
6902 mf_set_flow_value_masked(mf, set_field->value,
6903 ofpact_set_field_mask(set_field),
6904 flow);
2d9b49dd
BP
6905 } else {
6906 xlate_report(ctx, OFT_WARN,
6907 "unmet prerequisites for %s, set_field ignored",
6908 mf->name);
6909
b8778a0d 6910 }
b2dd70be
JR
6911 break;
6912
9583bc14 6913 case OFPACT_STACK_PUSH:
33bf9176
BP
6914 nxm_execute_stack_push(ofpact_get_STACK_PUSH(a), flow, wc,
6915 &ctx->stack);
9583bc14
EJ
6916 break;
6917
6918 case OFPACT_STACK_POP:
2d9b49dd 6919 xlate_ofpact_stack_pop(ctx, ofpact_get_STACK_POP(a));
9583bc14
EJ
6920 break;
6921
6922 case OFPACT_PUSH_MPLS:
8bfd0fda 6923 compose_mpls_push_action(ctx, ofpact_get_PUSH_MPLS(a));
9583bc14
EJ
6924 break;
6925
6926 case OFPACT_POP_MPLS:
8bfd0fda 6927 compose_mpls_pop_action(ctx, ofpact_get_POP_MPLS(a)->ethertype);
9583bc14
EJ
6928 break;
6929
097d4939 6930 case OFPACT_SET_MPLS_LABEL:
8bfd0fda
BP
6931 compose_set_mpls_label_action(
6932 ctx, ofpact_get_SET_MPLS_LABEL(a)->label);
1d741d6d 6933 break;
097d4939
JR
6934
6935 case OFPACT_SET_MPLS_TC:
8bfd0fda 6936 compose_set_mpls_tc_action(ctx, ofpact_get_SET_MPLS_TC(a)->tc);
097d4939
JR
6937 break;
6938
9583bc14 6939 case OFPACT_SET_MPLS_TTL:
8bfd0fda 6940 compose_set_mpls_ttl_action(ctx, ofpact_get_SET_MPLS_TTL(a)->ttl);
9583bc14
EJ
6941 break;
6942
6943 case OFPACT_DEC_MPLS_TTL:
9cfef3d0 6944 if (compose_dec_mpls_ttl_action(ctx)) {
ad3efdcb 6945 return;
9583bc14
EJ
6946 }
6947 break;
6948
491e05c2
YY
6949 case OFPACT_DEC_NSH_TTL:
6950 if (compose_dec_nsh_ttl_action(ctx)) {
6951 return;
6952 }
6953 break;
6954
9583bc14 6955 case OFPACT_DEC_TTL:
f74e7df7 6956 wc->masks.nw_ttl = 0xff;
9583bc14 6957 if (compose_dec_ttl(ctx, ofpact_get_DEC_TTL(a))) {
ad3efdcb 6958 return;
9583bc14
EJ
6959 }
6960 break;
6961
6962 case OFPACT_NOTE:
6963 /* Nothing to do. */
6964 break;
6965
6966 case OFPACT_MULTIPATH:
33bf9176 6967 multipath_execute(ofpact_get_MULTIPATH(a), flow, wc);
2d9b49dd 6968 xlate_report_subfield(ctx, &ofpact_get_MULTIPATH(a)->dst);
9583bc14
EJ
6969 break;
6970
6971 case OFPACT_BUNDLE:
f5634764
KG
6972 xlate_bundle_action(ctx, ofpact_get_BUNDLE(a), last,
6973 group_bucket_action);
9583bc14
EJ
6974 break;
6975
6976 case OFPACT_OUTPUT_REG:
f5634764
KG
6977 xlate_output_reg_action(ctx, ofpact_get_OUTPUT_REG(a), last,
6978 group_bucket_action);
9583bc14
EJ
6979 break;
6980
aaca4fe0
WT
6981 case OFPACT_OUTPUT_TRUNC:
6982 xlate_output_trunc_action(ctx, ofpact_get_OUTPUT_TRUNC(a)->port,
f5634764
KG
6983 ofpact_get_OUTPUT_TRUNC(a)->max_len, last,
6984 group_bucket_action);
aaca4fe0
WT
6985 break;
6986
9583bc14
EJ
6987 case OFPACT_LEARN:
6988 xlate_learn_action(ctx, ofpact_get_LEARN(a));
6989 break;
6990
2d9b49dd 6991 case OFPACT_CONJUNCTION:
afc3987b
BP
6992 /* A flow with a "conjunction" action represents part of a special
6993 * kind of "set membership match". Such a flow should not actually
6994 * get executed, but it could via, say, a "packet-out", even though
6995 * that wouldn't be useful. Log it to help debugging. */
2d9b49dd 6996 xlate_report_error(ctx, "executing no-op conjunction action");
18080541
BP
6997 break;
6998
9583bc14
EJ
6999 case OFPACT_EXIT:
7000 ctx->exit = true;
7001 break;
7002
2d9b49dd
BP
7003 case OFPACT_UNROLL_XLATE:
7004 xlate_ofpact_unroll_xlate(ctx, ofpact_get_UNROLL_XLATE(a));
e672ff9b 7005 break;
2d9b49dd 7006
9583bc14 7007 case OFPACT_FIN_TIMEOUT:
33bf9176 7008 memset(&wc->masks.nw_proto, 0xff, sizeof wc->masks.nw_proto);
9583bc14
EJ
7009 xlate_fin_timeout(ctx, ofpact_get_FIN_TIMEOUT(a));
7010 break;
7011
7012 case OFPACT_CLEAR_ACTIONS:
2d9b49dd 7013 xlate_report_action_set(ctx, "was");
7fdb60a7 7014 ofpbuf_clear(&ctx->action_set);
c61f3870
BP
7015 ctx->xin->flow.actset_output = OFPP_UNSET;
7016 ctx->action_set_has_group = false;
7fdb60a7
SH
7017 break;
7018
7019 case OFPACT_WRITE_ACTIONS:
7e7e8dbb 7020 xlate_write_actions(ctx, ofpact_get_WRITE_ACTIONS(a));
2d9b49dd 7021 xlate_report_action_set(ctx, "is");
9583bc14
EJ
7022 break;
7023
7024 case OFPACT_WRITE_METADATA:
7025 metadata = ofpact_get_WRITE_METADATA(a);
33bf9176
BP
7026 flow->metadata &= ~metadata->mask;
7027 flow->metadata |= metadata->metadata & metadata->mask;
9583bc14
EJ
7028 break;
7029
638a19b0 7030 case OFPACT_METER:
076caa2f 7031 xlate_meter_action(ctx, ofpact_get_METER(a));
638a19b0
JR
7032 break;
7033
9583bc14 7034 case OFPACT_GOTO_TABLE: {
9583bc14 7035 struct ofpact_goto_table *ogt = ofpact_get_GOTO_TABLE(a);
9583bc14 7036
9167fc1a
JR
7037 ovs_assert(ctx->table_id < ogt->table_id);
7038
4468099e 7039 xlate_table_action(ctx, ctx->xin->flow.in_port.ofp_port,
96c3a6e5
AZ
7040 ogt->table_id, true, true, false, last,
7041 do_xlate_actions);
9583bc14
EJ
7042 break;
7043 }
7044
7045 case OFPACT_SAMPLE:
7046 xlate_sample_action(ctx, ofpact_get_SAMPLE(a));
7047 break;
d4abaff5 7048
7ae62a67 7049 case OFPACT_CLONE:
feee58b9 7050 compose_clone(ctx, ofpact_get_CLONE(a), last);
7ae62a67
WT
7051 break;
7052
f839892a
JS
7053 case OFPACT_ENCAP:
7054 xlate_generic_encap_action(ctx, ofpact_get_ENCAP(a));
7055 break;
7056
7057 case OFPACT_DECAP: {
7058 bool recirc_needed =
7059 xlate_generic_decap_action(ctx, ofpact_get_DECAP(a));
7060 if (!ctx->error && recirc_needed) {
7061 /* Recirculate for parsing of inner packet. */
7062 ctx_trigger_freeze(ctx);
7063 /* Then continue with next action. */
7064 a = ofpact_next(a);
7065 }
7066 break;
7067 }
7068
07659514 7069 case OFPACT_CT:
feee58b9 7070 compose_conntrack_action(ctx, ofpact_get_CT(a), last);
07659514
JS
7071 break;
7072
72fe7578 7073 case OFPACT_CT_CLEAR:
1fe178d2 7074 compose_ct_clear_action(ctx);
72fe7578
BP
7075 break;
7076
9ac0aada
JR
7077 case OFPACT_NAT:
7078 /* This will be processed by compose_conntrack_action(). */
7079 ctx->ct_nat_action = ofpact_get_NAT(a);
7080 break;
7081
d4abaff5 7082 case OFPACT_DEBUG_RECIRC:
1d361a81 7083 ctx_trigger_freeze(ctx);
d4abaff5
BP
7084 a = ofpact_next(a);
7085 break;
a934a3dd
JP
7086
7087 case OFPACT_DEBUG_SLOW:
7088 ctx->xout->slow |= SLOW_ACTION;
7089 break;
5b34f8fc
NS
7090
7091 case OFPACT_CHECK_PKT_LARGER: {
7092 if (last) {
7093 /* If this is last action, then there is no need to
7094 * translate the action. */
7095 break;
7096 }
7097 const struct ofpact *remaining_acts = ofpact_next(a);
7098 size_t remaining_acts_len = ofpact_remaining_len(remaining_acts,
7099 ofpacts,
7100 ofpacts_len);
7101 xlate_check_pkt_larger(ctx, ofpact_get_CHECK_PKT_LARGER(a),
7102 remaining_acts, remaining_acts_len);
7103 break;
7104 }
9583bc14 7105 }
1d741d6d
JR
7106
7107 /* Check if need to store this and the remaining actions for later
7108 * execution. */
1d361a81
BP
7109 if (!ctx->error && ctx->exit && ctx_first_frozen_action(ctx)) {
7110 freeze_unroll_actions(a, ofpact_end(ofpacts, ofpacts_len), ctx);
1d741d6d
JR
7111 break;
7112 }
9583bc14 7113 }
9583bc14
EJ
7114}
7115
7116void
7117xlate_in_init(struct xlate_in *xin, struct ofproto_dpif *ofproto,
1f4a8933
JR
7118 ovs_version_t version, const struct flow *flow,
7119 ofp_port_t in_port, struct rule_dpif *rule, uint16_t tcp_flags,
1520ef4f
BP
7120 const struct dp_packet *packet, struct flow_wildcards *wc,
7121 struct ofpbuf *odp_actions)
9583bc14
EJ
7122{
7123 xin->ofproto = ofproto;
1f4a8933 7124 xin->tables_version = version;
9583bc14 7125 xin->flow = *flow;
8d8ab6c2 7126 xin->upcall_flow = flow;
cc377352 7127 xin->flow.in_port.ofp_port = in_port;
c61f3870 7128 xin->flow.actset_output = OFPP_UNSET;
9583bc14 7129 xin->packet = packet;
df70a773 7130 xin->allow_side_effects = packet != NULL;
9583bc14 7131 xin->rule = rule;
b256dc52 7132 xin->xcache = NULL;
9583bc14
EJ
7133 xin->ofpacts = NULL;
7134 xin->ofpacts_len = 0;
7135 xin->tcp_flags = tcp_flags;
2d9b49dd 7136 xin->trace = NULL;
9583bc14 7137 xin->resubmit_stats = NULL;
790c5d26 7138 xin->depth = 0;
cdd42eda 7139 xin->resubmits = 0;
49a73e0c 7140 xin->wc = wc;
1520ef4f 7141 xin->odp_actions = odp_actions;
331c07ac 7142 xin->in_packet_out = false;
e6bc8e74 7143 xin->recirc_queue = NULL;
00135b86 7144 xin->xport_uuid = UUID_ZERO;
e672ff9b
JR
7145
7146 /* Do recirc lookup. */
1d361a81 7147 xin->frozen_state = NULL;
29b1ea3f
BP
7148 if (flow->recirc_id) {
7149 const struct recirc_id_node *node
7150 = recirc_id_node_find(flow->recirc_id);
7151 if (node) {
1d361a81 7152 xin->frozen_state = &node->state;
29b1ea3f
BP
7153 }
7154 }
9583bc14
EJ
7155}
7156
7157void
7158xlate_out_uninit(struct xlate_out *xout)
7159{
e672ff9b 7160 if (xout) {
fbf5d6ec 7161 recirc_refs_unref(&xout->recircs);
9583bc14
EJ
7162 }
7163}
9583bc14 7164\f
55954f6e
EJ
7165static struct skb_priority_to_dscp *
7166get_skb_priority(const struct xport *xport, uint32_t skb_priority)
7167{
7168 struct skb_priority_to_dscp *pdscp;
7169 uint32_t hash;
7170
7171 hash = hash_int(skb_priority, 0);
7172 HMAP_FOR_EACH_IN_BUCKET (pdscp, hmap_node, hash, &xport->skb_priorities) {
7173 if (pdscp->skb_priority == skb_priority) {
7174 return pdscp;
7175 }
7176 }
7177 return NULL;
7178}
7179
7180static bool
7181dscp_from_skb_priority(const struct xport *xport, uint32_t skb_priority,
7182 uint8_t *dscp)
7183{
7184 struct skb_priority_to_dscp *pdscp = get_skb_priority(xport, skb_priority);
7185 *dscp = pdscp ? pdscp->dscp : 0;
7186 return pdscp != NULL;
7187}
7188
16194afd
DDP
7189static size_t
7190count_skb_priorities(const struct xport *xport)
7191{
7192 return hmap_count(&xport->skb_priorities);
7193}
7194
55954f6e
EJ
7195static void
7196clear_skb_priorities(struct xport *xport)
7197{
4ec3d7c7 7198 struct skb_priority_to_dscp *pdscp;
55954f6e 7199
4ec3d7c7 7200 HMAP_FOR_EACH_POP (pdscp, hmap_node, &xport->skb_priorities) {
55954f6e
EJ
7201 free(pdscp);
7202 }
7203}
7204
ce4a6b76
BP
7205static bool
7206actions_output_to_local_port(const struct xlate_ctx *ctx)
7207{
46c88433 7208 odp_port_t local_odp_port = ofp_port_to_odp_port(ctx->xbridge, OFPP_LOCAL);
ce4a6b76
BP
7209 const struct nlattr *a;
7210 unsigned int left;
7211
1520ef4f
BP
7212 NL_ATTR_FOR_EACH_UNSAFE (a, left, ctx->odp_actions->data,
7213 ctx->odp_actions->size) {
ce4a6b76
BP
7214 if (nl_attr_type(a) == OVS_ACTION_ATTR_OUTPUT
7215 && nl_attr_get_odp_port(a) == local_odp_port) {
7216 return true;
7217 }
7218 }
7219 return false;
7220}
9583bc14 7221
5e2a6702 7222#if defined(__linux__)
7d031d7e
BP
7223/* Returns the maximum number of packets that the Linux kernel is willing to
7224 * queue up internally to certain kinds of software-implemented ports, or the
7225 * default (and rarely modified) value if it cannot be determined. */
7226static int
7227netdev_max_backlog(void)
7228{
7229 static struct ovsthread_once once = OVSTHREAD_ONCE_INITIALIZER;
7230 static int max_backlog = 1000; /* The normal default value. */
7231
7232 if (ovsthread_once_start(&once)) {
7233 static const char filename[] = "/proc/sys/net/core/netdev_max_backlog";
7234 FILE *stream;
7235 int n;
7236
7237 stream = fopen(filename, "r");
7238 if (!stream) {
120c348f 7239 VLOG_INFO("%s: open failed (%s)", filename, ovs_strerror(errno));
7d031d7e
BP
7240 } else {
7241 if (fscanf(stream, "%d", &n) != 1) {
7242 VLOG_WARN("%s: read error", filename);
7243 } else if (n <= 100) {
7244 VLOG_WARN("%s: unexpectedly small value %d", filename, n);
7245 } else {
7246 max_backlog = n;
7247 }
7248 fclose(stream);
7249 }
7250 ovsthread_once_done(&once);
7251
7252 VLOG_DBG("%s: using %d max_backlog", filename, max_backlog);
7253 }
7254
7255 return max_backlog;
7256}
7257
7258/* Counts and returns the number of OVS_ACTION_ATTR_OUTPUT actions in
7259 * 'odp_actions'. */
7260static int
7261count_output_actions(const struct ofpbuf *odp_actions)
7262{
7263 const struct nlattr *a;
7264 size_t left;
7265 int n = 0;
7266
6fd6ed71 7267 NL_ATTR_FOR_EACH_UNSAFE (a, left, odp_actions->data, odp_actions->size) {
7d031d7e
BP
7268 if (a->nla_type == OVS_ACTION_ATTR_OUTPUT) {
7269 n++;
7270 }
7271 }
7272 return n;
7273}
5e2a6702 7274#endif /* defined(__linux__) */
7d031d7e
BP
7275
7276/* Returns true if 'odp_actions' contains more output actions than the datapath
7277 * can reliably handle in one go. On Linux, this is the value of the
7278 * net.core.netdev_max_backlog sysctl, which limits the maximum number of
7279 * packets that the kernel is willing to queue up for processing while the
7280 * datapath is processing a set of actions. */
7281static bool
5e2a6702 7282too_many_output_actions(const struct ofpbuf *odp_actions OVS_UNUSED)
7d031d7e
BP
7283{
7284#ifdef __linux__
6fd6ed71 7285 return (odp_actions->size / NL_A_U32_SIZE > netdev_max_backlog()
7d031d7e
BP
7286 && count_output_actions(odp_actions) > netdev_max_backlog());
7287#else
7288 /* OSes other than Linux might have similar limits, but we don't know how
7289 * to determine them.*/
7290 return false;
7291#endif
7292}
7293
234c3da9
BP
7294static void
7295xlate_wc_init(struct xlate_ctx *ctx)
7296{
7297 flow_wildcards_init_catchall(ctx->wc);
7298
7299 /* Some fields we consider to always be examined. */
3d4b2e6e 7300 WC_MASK_FIELD(ctx->wc, packet_type);
5e2e998a 7301 WC_MASK_FIELD(ctx->wc, in_port);
29b5c0c3 7302 WC_MASK_FIELD(ctx->wc, dl_type);
234c3da9 7303 if (is_ip_any(&ctx->xin->flow)) {
5e2e998a 7304 WC_MASK_FIELD_MASK(ctx->wc, nw_frag, FLOW_NW_FRAG_MASK);
234c3da9
BP
7305 }
7306
7307 if (ctx->xbridge->support.odp.recirc) {
7308 /* Always exactly match recirc_id when datapath supports
7309 * recirculation. */
5e2e998a 7310 WC_MASK_FIELD(ctx->wc, recirc_id);
234c3da9
BP
7311 }
7312
7313 if (ctx->xbridge->netflow) {
7314 netflow_mask_wc(&ctx->xin->flow, ctx->wc);
7315 }
7316
7317 tnl_wc_init(&ctx->xin->flow, ctx->wc);
7318}
7319
7320static void
7321xlate_wc_finish(struct xlate_ctx *ctx)
7322{
f0fb825a
EG
7323 int i;
7324
234c3da9
BP
7325 /* Clear the metadata and register wildcard masks, because we won't
7326 * use non-header fields as part of the cache. */
7327 flow_wildcards_clear_non_packet_fields(ctx->wc);
7328
29b5c0c3
BP
7329 /* Wildcard Ethernet address fields if the original packet type was not
7330 * Ethernet.
7331 *
7332 * (The Ethertype field is used even when the original packet type is not
7333 * Ethernet.) */
beb75a40
JS
7334 if (ctx->xin->upcall_flow->packet_type != htonl(PT_ETH)) {
7335 ctx->wc->masks.dl_dst = eth_addr_zero;
7336 ctx->wc->masks.dl_src = eth_addr_zero;
7337 }
7338
234c3da9
BP
7339 /* ICMPv4 and ICMPv6 have 8-bit "type" and "code" fields. struct flow
7340 * uses the low 8 bits of the 16-bit tp_src and tp_dst members to
7341 * represent these fields. The datapath interface, on the other hand,
7342 * represents them with just 8 bits each. This means that if the high
7343 * 8 bits of the masks for these fields somehow become set, then they
7344 * will get chopped off by a round trip through the datapath, and
7345 * revalidation will spot that as an inconsistency and delete the flow.
7346 * Avoid the problem here by making sure that only the low 8 bits of
7347 * either field can be unwildcarded for ICMP.
7348 */
a75636c8 7349 if (is_icmpv4(&ctx->xin->flow, NULL) || is_icmpv6(&ctx->xin->flow, NULL)) {
234c3da9
BP
7350 ctx->wc->masks.tp_src &= htons(UINT8_MAX);
7351 ctx->wc->masks.tp_dst &= htons(UINT8_MAX);
7352 }
7353 /* VLAN_TCI CFI bit must be matched if any of the TCI is matched. */
f0fb825a
EG
7354 for (i = 0; i < FLOW_MAX_VLAN_HEADERS; i++) {
7355 if (ctx->wc->masks.vlans[i].tci) {
7356 ctx->wc->masks.vlans[i].tci |= htons(VLAN_CFI);
7357 }
234c3da9 7358 }
4a7ab326
DDP
7359
7360 /* The classifier might return masks that match on tp_src and tp_dst even
7361 * for later fragments. This happens because there might be flows that
7362 * match on tp_src or tp_dst without matching on the frag bits, because
7363 * it is not a prerequisite for OpenFlow. Since it is a prerequisite for
7364 * datapath flows and since tp_src and tp_dst are always going to be 0,
7365 * wildcard the fields here. */
7366 if (ctx->xin->flow.nw_frag & FLOW_NW_FRAG_LATER) {
7367 ctx->wc->masks.tp_src = 0;
7368 ctx->wc->masks.tp_dst = 0;
7369 }
736a8111
VDA
7370
7371 /* Clear flow wildcard bits for fields which are not present
7372 * in the original packet header. These wildcards may get set
7373 * due to push/set_field actions. This results into frequent
7374 * invalidation of datapath flows by revalidator thread. */
7375
7376 /* Clear mpls label wc bits if original packet is non-mpls. */
7377 if (!eth_type_mpls(ctx->xin->upcall_flow->dl_type)) {
7378 for (i = 0; i < FLOW_MAX_MPLS_LABELS; i++) {
7379 ctx->wc->masks.mpls_lse[i] = 0;
7380 }
7381 }
7382 /* Clear vlan header wc bits if original packet does not have
7383 * vlan header. */
7384 for (i = 0; i < FLOW_MAX_VLAN_HEADERS; i++) {
7385 if (!eth_type_vlan(ctx->xin->upcall_flow->vlans[i].tpid)) {
7386 ctx->wc->masks.vlans[i].tpid = 0;
7387 ctx->wc->masks.vlans[i].tci = 0;
7388 }
7389 }
234c3da9
BP
7390}
7391
e672ff9b
JR
7392/* Translates the flow, actions, or rule in 'xin' into datapath actions in
7393 * 'xout'.
56450a41 7394 * The caller must take responsibility for eventually freeing 'xout', with
fff1b9c0
JR
7395 * xlate_out_uninit().
7396 * Returns 'XLATE_OK' if translation was successful. In case of an error an
7397 * empty set of actions will be returned in 'xin->odp_actions' (if non-NULL),
7398 * so that most callers may ignore the return value and transparently install a
7399 * drop flow when the translation fails. */
7400enum xlate_error
84f0f298 7401xlate_actions(struct xlate_in *xin, struct xlate_out *xout)
9583bc14 7402{
e467ea42
BP
7403 *xout = (struct xlate_out) {
7404 .slow = 0,
fbf5d6ec 7405 .recircs = RECIRC_REFS_EMPTY_INITIALIZER,
e467ea42
BP
7406 };
7407
84f0f298 7408 struct xlate_cfg *xcfg = ovsrcu_get(struct xlate_cfg *, &xcfgp);
bb00fdef
BP
7409 struct xbridge *xbridge = xbridge_lookup(xcfg, xin->ofproto);
7410 if (!xbridge) {
fff1b9c0 7411 return XLATE_BRIDGE_NOT_FOUND;
bb00fdef
BP
7412 }
7413
33bf9176
BP
7414 struct flow *flow = &xin->flow;
7415
84cf3c1f 7416 uint8_t stack_stub[1024];
bb00fdef 7417 uint64_t action_set_stub[1024 / 8];
1d361a81 7418 uint64_t frozen_actions_stub[1024 / 8];
1520ef4f
BP
7419 uint64_t actions_stub[256 / 8];
7420 struct ofpbuf scratch_actions = OFPBUF_STUB_INITIALIZER(actions_stub);
bb00fdef
BP
7421 struct xlate_ctx ctx = {
7422 .xin = xin,
7423 .xout = xout,
7424 .base_flow = *flow,
c2b878e0 7425 .orig_tunnel_ipv6_dst = flow_tnl_dst(&flow->tunnel),
0506f184 7426 .xcfg = xcfg,
bb00fdef
BP
7427 .xbridge = xbridge,
7428 .stack = OFPBUF_STUB_INITIALIZER(stack_stub),
7429 .rule = xin->rule,
c0e638aa
BP
7430 .wc = (xin->wc
7431 ? xin->wc
f36efd90 7432 : &(struct flow_wildcards) { .masks = { .dl_type = 0 } }),
1520ef4f 7433 .odp_actions = xin->odp_actions ? xin->odp_actions : &scratch_actions,
bb00fdef 7434
790c5d26 7435 .depth = xin->depth,
cdd42eda 7436 .resubmits = xin->resubmits,
bb00fdef 7437 .in_action_set = false,
331c07ac 7438 .in_packet_out = xin->in_packet_out,
f839892a 7439 .pending_encap = false,
88ec1e0a 7440 .pending_decap = false,
1fc11c59 7441 .encap_data = NULL,
bb00fdef
BP
7442
7443 .table_id = 0,
7444 .rule_cookie = OVS_BE64_MAX,
7445 .orig_skb_priority = flow->skb_priority,
7446 .sflow_n_outputs = 0,
7447 .sflow_odp_port = 0,
2031ef97 7448 .nf_output_iface = NF_OUT_DROP,
bb00fdef 7449 .exit = false,
fff1b9c0 7450 .error = XLATE_OK,
3d6151f3 7451 .mirrors = 0,
bb00fdef 7452
1d361a81 7453 .freezing = false,
53cc166a 7454 .recirc_update_dp_hash = false,
1d361a81 7455 .frozen_actions = OFPBUF_STUB_INITIALIZER(frozen_actions_stub),
77ab5fd2 7456 .pause = NULL,
bb00fdef 7457
e12ec36b 7458 .was_mpls = false,
07659514 7459 .conntracked = false,
bb00fdef 7460
9ac0aada
JR
7461 .ct_nat_action = NULL,
7462
bb00fdef
BP
7463 .action_set_has_group = false,
7464 .action_set = OFPBUF_STUB_INITIALIZER(action_set_stub),
7465 };
865ca6cf
BP
7466
7467 /* 'base_flow' reflects the packet as it came in, but we need it to reflect
42deb67d
PS
7468 * the packet as the datapath will treat it for output actions. Our
7469 * datapath doesn't retain tunneling information without us re-setting
7470 * it, so clear the tunnel data.
865ca6cf 7471 */
42deb67d 7472
bb00fdef 7473 memset(&ctx.base_flow.tunnel, 0, sizeof ctx.base_flow.tunnel);
865ca6cf 7474
1520ef4f 7475 ofpbuf_reserve(ctx.odp_actions, NL_A_U32_SIZE);
c0e638aa 7476 xlate_wc_init(&ctx);
bb00fdef 7477
46c88433 7478 COVERAGE_INC(xlate_actions);
9583bc14 7479
2d9b49dd
BP
7480 xin->trace = xlate_report(&ctx, OFT_BRIDGE, "bridge(\"%s\")",
7481 xbridge->name);
1d361a81
BP
7482 if (xin->frozen_state) {
7483 const struct frozen_state *state = xin->frozen_state;
e672ff9b 7484
2d9b49dd
BP
7485 struct ovs_list *old_trace = xin->trace;
7486 xin->trace = xlate_report(&ctx, OFT_THAW, "thaw");
d6bef3cc 7487
e672ff9b 7488 if (xin->ofpacts_len > 0 || ctx.rule) {
2d9b49dd
BP
7489 xlate_report_error(&ctx, "Recirculation conflict (%s)!",
7490 xin->ofpacts_len ? "actions" : "rule");
fff1b9c0 7491 ctx.error = XLATE_RECIRCULATION_CONFLICT;
1520ef4f 7492 goto exit;
e672ff9b
JR
7493 }
7494
7495 /* Set the bridge for post-recirculation processing if needed. */
07a3cd5c 7496 if (!uuid_equals(&ctx.xbridge->ofproto->uuid, &state->ofproto_uuid)) {
2082425c 7497 const struct xbridge *new_bridge
290835f9 7498 = xbridge_lookup_by_uuid(xcfg, &state->ofproto_uuid);
e672ff9b
JR
7499
7500 if (OVS_UNLIKELY(!new_bridge)) {
7501 /* Drop the packet if the bridge cannot be found. */
2d9b49dd 7502 xlate_report_error(&ctx, "Frozen bridge no longer exists.");
fff1b9c0 7503 ctx.error = XLATE_BRIDGE_NOT_FOUND;
2d9b49dd 7504 xin->trace = old_trace;
1520ef4f 7505 goto exit;
e672ff9b
JR
7506 }
7507 ctx.xbridge = new_bridge;
1f4a8933
JR
7508 /* The bridge is now known so obtain its table version. */
7509 ctx.xin->tables_version
7510 = ofproto_dpif_get_tables_version(ctx.xbridge->ofproto);
e672ff9b
JR
7511 }
7512
1d361a81
BP
7513 /* Set the thawed table id. Note: A table lookup is done only if there
7514 * are no frozen actions. */
2082425c 7515 ctx.table_id = state->table_id;
2d9b49dd
BP
7516 xlate_report(&ctx, OFT_THAW,
7517 "Resuming from table %"PRIu8, ctx.table_id);
e672ff9b 7518
40b0fbd3 7519 ctx.conntracked = state->conntracked;
07659514 7520 if (!state->conntracked) {
72fe7578 7521 clear_conntrack(&ctx);
07659514
JS
7522 }
7523
e672ff9b 7524 /* Restore pipeline metadata. May change flow's in_port and other
1d361a81
BP
7525 * metadata to the values that existed when freezing was triggered. */
7526 frozen_metadata_to_flow(&state->metadata, flow);
e672ff9b
JR
7527
7528 /* Restore stack, if any. */
2082425c 7529 if (state->stack) {
84cf3c1f 7530 ofpbuf_put(&ctx.stack, state->stack, state->stack_size);
e672ff9b
JR
7531 }
7532
29bae541
BP
7533 /* Restore mirror state. */
7534 ctx.mirrors = state->mirrors;
7535
e672ff9b 7536 /* Restore action set, if any. */
2082425c 7537 if (state->action_set_len) {
2d9b49dd 7538 xlate_report_actions(&ctx, OFT_THAW, "Restoring action set",
417509fa 7539 state->action_set, state->action_set_len);
d6bef3cc 7540
7e7e8dbb
BP
7541 flow->actset_output = OFPP_UNSET;
7542 xlate_write_actions__(&ctx, state->action_set,
7543 state->action_set_len);
e672ff9b
JR
7544 }
7545
1d361a81
BP
7546 /* Restore frozen actions. If there are no actions, processing will
7547 * start with a lookup in the table set above. */
417509fa
BP
7548 xin->ofpacts = state->ofpacts;
7549 xin->ofpacts_len = state->ofpacts_len;
7550 if (state->ofpacts_len) {
2d9b49dd 7551 xlate_report_actions(&ctx, OFT_THAW, "Restoring actions",
d6bef3cc 7552 xin->ofpacts, xin->ofpacts_len);
e672ff9b 7553 }
e672ff9b 7554
2d9b49dd
BP
7555 xin->trace = old_trace;
7556 } else if (OVS_UNLIKELY(flow->recirc_id)) {
7557 xlate_report_error(&ctx,
7558 "Recirculation context not found for ID %"PRIx32,
7559 flow->recirc_id);
fff1b9c0 7560 ctx.error = XLATE_NO_RECIRCULATION_CONTEXT;
1520ef4f 7561 goto exit;
e672ff9b 7562 }
9583bc14 7563
8d8ab6c2
JG
7564 /* Tunnel metadata in udpif format must be normalized before translation. */
7565 if (flow->tunnel.flags & FLOW_TNL_F_UDPIF) {
5b09d9f7
MS
7566 const struct tun_table *tun_tab = ofproto_get_tun_tab(
7567 &ctx.xbridge->ofproto->up);
8d8ab6c2
JG
7568 int err;
7569
7570 err = tun_metadata_from_geneve_udpif(tun_tab, &xin->upcall_flow->tunnel,
7571 &xin->upcall_flow->tunnel,
7572 &flow->tunnel);
7573 if (err) {
2d9b49dd 7574 xlate_report_error(&ctx, "Invalid Geneve tunnel metadata");
8d8ab6c2
JG
7575 ctx.error = XLATE_INVALID_TUNNEL_METADATA;
7576 goto exit;
7577 }
254878c1 7578 } else if (!flow->tunnel.metadata.tab || xin->frozen_state) {
8d8ab6c2
JG
7579 /* If the original flow did not come in on a tunnel, then it won't have
7580 * FLOW_TNL_F_UDPIF set. However, we still need to have a metadata
7581 * table in case we generate tunnel actions. */
254878c1
YHW
7582 /* If the translation is from a frozen state, we use the latest
7583 * TLV map to avoid segmentation fault in case the old TLV map is
7584 * replaced by a new one.
7585 * XXX: It is better to abort translation if the table is changed. */
5b09d9f7
MS
7586 flow->tunnel.metadata.tab = ofproto_get_tun_tab(
7587 &ctx.xbridge->ofproto->up);
8d8ab6c2
JG
7588 }
7589 ctx.wc->masks.tunnel.metadata.tab = flow->tunnel.metadata.tab;
7590
beb75a40
JS
7591 /* Get the proximate input port of the packet. (If xin->frozen_state,
7592 * flow->in_port is the ultimate input port of the packet.) */
7593 struct xport *in_port = get_ofp_port(xbridge,
7594 ctx.base_flow.in_port.ofp_port);
00135b86
ZB
7595 if (in_port && !in_port->peer) {
7596 ctx.xin->xport_uuid = in_port->uuid;
7597 }
beb75a40 7598
875ab130
BP
7599 if (flow->packet_type != htonl(PT_ETH) && in_port &&
7600 in_port->pt_mode == NETDEV_PT_LEGACY_L3 && ctx.table_id == 0) {
beb75a40
JS
7601 /* Add dummy Ethernet header to non-L2 packet if it's coming from a
7602 * L3 port. So all packets will be L2 packets for lookup.
7603 * The dl_type has already been set from the packet_type. */
7604 flow->packet_type = htonl(PT_ETH);
7605 flow->dl_src = eth_addr_zero;
7606 flow->dl_dst = eth_addr_zero;
f839892a 7607 ctx.pending_encap = true;
beb75a40
JS
7608 }
7609
10c44245 7610 if (!xin->ofpacts && !ctx.rule) {
b2e89cc9 7611 ctx.rule = rule_dpif_lookup_from_table(
1f4a8933 7612 ctx.xbridge->ofproto, ctx.xin->tables_version, flow, ctx.wc,
1e1e1d19 7613 ctx.xin->resubmit_stats, &ctx.table_id,
a027899e 7614 flow->in_port.ofp_port, true, true, ctx.xin->xcache);
10c44245 7615 if (ctx.xin->resubmit_stats) {
16441315 7616 rule_dpif_credit_stats(ctx.rule, ctx.xin->resubmit_stats, false);
10c44245 7617 }
b256dc52
JS
7618 if (ctx.xin->xcache) {
7619 struct xc_entry *entry;
7620
7621 entry = xlate_cache_add_entry(ctx.xin->xcache, XC_RULE);
901a517e 7622 entry->rule = ctx.rule;
07a3cd5c 7623 ofproto_rule_ref(&ctx.rule->up);
b256dc52 7624 }
a8c31348 7625
2d9b49dd 7626 xlate_report_table(&ctx, ctx.rule, ctx.table_id);
10c44245 7627 }
10c44245 7628
1d361a81
BP
7629 /* Tunnel stats only for not-thawed packets. */
7630 if (!xin->frozen_state && in_port && in_port->is_tunnel) {
b256dc52
JS
7631 if (ctx.xin->resubmit_stats) {
7632 netdev_vport_inc_rx(in_port->netdev, ctx.xin->resubmit_stats);
7633 if (in_port->bfd) {
7634 bfd_account_rx(in_port->bfd, ctx.xin->resubmit_stats);
7635 }
7636 }
7637 if (ctx.xin->xcache) {
7638 struct xc_entry *entry;
7639
7640 entry = xlate_cache_add_entry(ctx.xin->xcache, XC_NETDEV);
901a517e
JR
7641 entry->dev.rx = netdev_ref(in_port->netdev);
7642 entry->dev.bfd = bfd_ref(in_port->bfd);
d6fc5f57
EJ
7643 }
7644 }
7645
1d361a81 7646 if (!xin->frozen_state && process_special(&ctx, in_port)) {
bef1403e
BP
7647 /* process_special() did all the processing for this packet.
7648 *
1d361a81
BP
7649 * We do not perform special processing on thawed packets, since that
7650 * was done before they were frozen and should not be redone. */
27a9c0e3 7651 mirror_ingress_packet(&ctx);
bef1403e
BP
7652 } else if (in_port && in_port->xbundle
7653 && xbundle_mirror_out(xbridge, in_port->xbundle)) {
2d9b49dd
BP
7654 xlate_report_error(&ctx, "dropping packet received on port "
7655 "%s, which is reserved exclusively for mirroring",
7656 in_port->xbundle->name);
bef1403e 7657 } else {
1d361a81 7658 /* Sampling is done on initial reception; don't redo after thawing. */
a6092018 7659 unsigned int user_cookie_offset = 0;
1d361a81 7660 if (!xin->frozen_state) {
a6092018
BP
7661 user_cookie_offset = compose_sflow_action(&ctx);
7662 compose_ipfix_action(&ctx, ODPP_NONE);
e672ff9b 7663 }
0731abc5 7664 size_t sample_actions_len = ctx.odp_actions->size;
a13a0209 7665 bool ecn_drop = !tnl_process_ecn(flow);
9583bc14 7666
a13a0209 7667 if (!ecn_drop
234c3da9 7668 && (!in_port || may_receive(in_port, &ctx))) {
1806291d
BP
7669 const struct ofpact *ofpacts;
7670 size_t ofpacts_len;
7671
7672 if (xin->ofpacts) {
7673 ofpacts = xin->ofpacts;
7674 ofpacts_len = xin->ofpacts_len;
7675 } else if (ctx.rule) {
7676 const struct rule_actions *actions
07a3cd5c 7677 = rule_get_actions(&ctx.rule->up);
1806291d
BP
7678 ofpacts = actions->ofpacts;
7679 ofpacts_len = actions->ofpacts_len;
07a3cd5c 7680 ctx.rule_cookie = ctx.rule->up.flow_cookie;
1806291d
BP
7681 } else {
7682 OVS_NOT_REACHED();
7683 }
7684
7efbc3b7 7685 mirror_ingress_packet(&ctx);
f5634764 7686 do_xlate_actions(ofpacts, ofpacts_len, &ctx, true, false);
fff1b9c0
JR
7687 if (ctx.error) {
7688 goto exit;
7689 }
9583bc14
EJ
7690
7691 /* We've let OFPP_NORMAL and the learning action look at the
1d361a81 7692 * packet, so cancel all actions and freezing if forwarding is
8a5fb3b4 7693 * disabled. */
9efd308e
DV
7694 if (in_port && (!xport_stp_forward_state(in_port) ||
7695 !xport_rstp_forward_state(in_port))) {
1520ef4f 7696 ctx.odp_actions->size = sample_actions_len;
1d361a81 7697 ctx_cancel_freeze(&ctx);
8a5fb3b4 7698 ofpbuf_clear(&ctx.action_set);
a13a0209 7699 ctx.error = XLATE_FORWARDING_DISABLED;
8a5fb3b4
BP
7700 }
7701
1d361a81 7702 if (!ctx.freezing) {
8a5fb3b4 7703 xlate_action_set(&ctx);
e672ff9b 7704 }
1d361a81 7705 if (ctx.freezing) {
77ab5fd2 7706 finish_freezing(&ctx);
9583bc14 7707 }
a13a0209
AT
7708 } else if (ecn_drop) {
7709 ctx.error = XLATE_CONGESTION_DROP;
9583bc14
EJ
7710 }
7711
e672ff9b 7712 /* Output only fully processed packets. */
1d361a81 7713 if (!ctx.freezing
e672ff9b 7714 && xbridge->has_in_band
ce4a6b76
BP
7715 && in_band_must_output_to_local_port(flow)
7716 && !actions_output_to_local_port(&ctx)) {
c9dc050d
AT
7717 WC_MASK_FIELD(ctx.wc, nw_proto);
7718 WC_MASK_FIELD(ctx.wc, tp_src);
7719 WC_MASK_FIELD(ctx.wc, tp_dst);
7720 WC_MASK_FIELD(ctx.wc, dl_type);
b1986b08
BP
7721 xlate_report(&ctx, OFT_DETAIL, "outputting DHCP packet "
7722 "to local port for in-band control");
11938578 7723 compose_output_action(&ctx, OFPP_LOCAL, NULL, false, false);
9583bc14 7724 }
aaa0fbae 7725
a6092018
BP
7726 if (user_cookie_offset) {
7727 fix_sflow_action(&ctx, user_cookie_offset);
e672ff9b 7728 }
9583bc14
EJ
7729 }
7730
1520ef4f 7731 if (nl_attr_oversized(ctx.odp_actions->size)) {
542024c4 7732 /* These datapath actions are too big for a Netlink attribute, so we
0f032e95
BP
7733 * can't hand them to the kernel directly. dpif_execute() can execute
7734 * them one by one with help, so just mark the result as SLOW_ACTION to
7735 * prevent the flow from being installed. */
7736 COVERAGE_INC(xlate_actions_oversize);
7737 ctx.xout->slow |= SLOW_ACTION;
1520ef4f 7738 } else if (too_many_output_actions(ctx.odp_actions)) {
7d031d7e
BP
7739 COVERAGE_INC(xlate_actions_too_many_output);
7740 ctx.xout->slow |= SLOW_ACTION;
542024c4
BP
7741 }
7742
64fb5f82
JP
7743 /* Update NetFlow for non-frozen traffic. */
7744 if (xbridge->netflow && !xin->frozen_state) {
1806291d
BP
7745 if (ctx.xin->resubmit_stats) {
7746 netflow_flow_update(xbridge->netflow, flow,
2031ef97 7747 ctx.nf_output_iface,
1806291d
BP
7748 ctx.xin->resubmit_stats);
7749 }
7750 if (ctx.xin->xcache) {
7751 struct xc_entry *entry;
b256dc52 7752
1806291d 7753 entry = xlate_cache_add_entry(ctx.xin->xcache, XC_NETFLOW);
901a517e
JR
7754 entry->nf.netflow = netflow_ref(xbridge->netflow);
7755 entry->nf.flow = xmemdup(flow, sizeof *flow);
7756 entry->nf.iface = ctx.nf_output_iface;
d6fc5f57
EJ
7757 }
7758 }
7759
8d8ab6c2
JG
7760 /* Translate tunnel metadata masks to udpif format if necessary. */
7761 if (xin->upcall_flow->tunnel.flags & FLOW_TNL_F_UDPIF) {
7762 if (ctx.wc->masks.tunnel.metadata.present.map) {
7763 const struct flow_tnl *upcall_tnl = &xin->upcall_flow->tunnel;
7764 struct geneve_opt opts[TLV_TOT_OPT_SIZE /
7765 sizeof(struct geneve_opt)];
7766
7767 tun_metadata_to_geneve_udpif_mask(&flow->tunnel,
7768 &ctx.wc->masks.tunnel,
7769 upcall_tnl->metadata.opts.gnv,
7770 upcall_tnl->metadata.present.len,
7771 opts);
7772 memset(&ctx.wc->masks.tunnel.metadata, 0,
7773 sizeof ctx.wc->masks.tunnel.metadata);
7774 memcpy(&ctx.wc->masks.tunnel.metadata.opts.gnv, opts,
7775 upcall_tnl->metadata.present.len);
7776 }
7777 ctx.wc->masks.tunnel.metadata.present.len = 0xff;
7778 ctx.wc->masks.tunnel.metadata.tab = NULL;
7779 ctx.wc->masks.tunnel.flags |= FLOW_TNL_F_UDPIF;
7780 } else if (!xin->upcall_flow->tunnel.metadata.tab) {
7781 /* If we didn't have options in UDPIF format and didn't have an existing
7782 * metadata table, then it means that there were no options at all when
7783 * we started processing and any wildcards we picked up were from
7784 * action generation. Without options on the incoming packet, wildcards
7785 * aren't meaningful. To avoid them possibly getting misinterpreted,
7786 * just clear everything. */
7787 if (ctx.wc->masks.tunnel.metadata.present.map) {
7788 memset(&ctx.wc->masks.tunnel.metadata, 0,
7789 sizeof ctx.wc->masks.tunnel.metadata);
7790 } else {
7791 ctx.wc->masks.tunnel.metadata.tab = NULL;
7792 }
7793 }
7794
c0e638aa 7795 xlate_wc_finish(&ctx);
1520ef4f
BP
7796
7797exit:
8d8ab6c2
JG
7798 /* Reset the table to what it was when we came in. If we only fetched
7799 * it locally, then it has no meaning outside of flow translation. */
7800 flow->tunnel.metadata.tab = xin->upcall_flow->tunnel.metadata.tab;
7801
1520ef4f
BP
7802 ofpbuf_uninit(&ctx.stack);
7803 ofpbuf_uninit(&ctx.action_set);
1d361a81 7804 ofpbuf_uninit(&ctx.frozen_actions);
1520ef4f 7805 ofpbuf_uninit(&scratch_actions);
1fc11c59 7806 ofpbuf_delete(ctx.encap_data);
fff1b9c0
JR
7807
7808 /* Make sure we return a "drop flow" in case of an error. */
7809 if (ctx.error) {
7810 xout->slow = 0;
7811 if (xin->odp_actions) {
7812 ofpbuf_clear(xin->odp_actions);
7813 }
7814 }
a13a0209
AT
7815
7816 /* Install drop action if datapath supports explicit drop action. */
7817 if (xin->odp_actions && !xin->odp_actions->size &&
7818 ovs_explicit_drop_action_supported(ctx.xbridge->ofproto)) {
7819 put_drop_action(xin->odp_actions, ctx.error);
7820 }
7821
7822 /* Since congestion drop and forwarding drop are not exactly
7823 * translation error, we are resetting the translation error.
7824 */
7825 if (ctx.error == XLATE_CONGESTION_DROP ||
7826 ctx.error == XLATE_FORWARDING_DISABLED) {
7827 ctx.error = XLATE_OK;
7828 }
7829
fff1b9c0 7830 return ctx.error;
91d6cd12
AW
7831}
7832
77ab5fd2
BP
7833enum ofperr
7834xlate_resume(struct ofproto_dpif *ofproto,
7835 const struct ofputil_packet_in_private *pin,
7836 struct ofpbuf *odp_actions,
2f355bff
YHW
7837 enum slow_path_reason *slow,
7838 struct flow *flow,
7839 struct xlate_cache *xcache)
77ab5fd2
BP
7840{
7841 struct dp_packet packet;
4d617a87
BP
7842 dp_packet_use_const(&packet, pin->base.packet,
7843 pin->base.packet_len);
77ab5fd2 7844
07706c50 7845 pkt_metadata_from_flow(&packet.md, &pin->base.flow_metadata.flow);
2f355bff 7846 flow_extract(&packet, flow);
77ab5fd2
BP
7847
7848 struct xlate_in xin;
1f4a8933 7849 xlate_in_init(&xin, ofproto, ofproto_dpif_get_tables_version(ofproto),
2f355bff 7850 flow, 0, NULL, ntohs(flow->tcp_flags),
77ab5fd2 7851 &packet, NULL, odp_actions);
2f355bff 7852 xin.xcache = xcache;
77ab5fd2
BP
7853
7854 struct ofpact_note noop;
7855 ofpact_init_NOTE(&noop);
7856 noop.length = 0;
7857
7858 bool any_actions = pin->actions_len > 0;
7859 struct frozen_state state = {
7860 .table_id = 0, /* Not the table where NXAST_PAUSE was executed. */
7861 .ofproto_uuid = pin->bridge,
7862 .stack = pin->stack,
84cf3c1f 7863 .stack_size = pin->stack_size,
77ab5fd2
BP
7864 .mirrors = pin->mirrors,
7865 .conntracked = pin->conntracked,
00135b86 7866 .xport_uuid = UUID_ZERO,
77ab5fd2
BP
7867
7868 /* When there are no actions, xlate_actions() will search the flow
7869 * table. We don't want it to do that (we want it to resume), so
7870 * supply a no-op action if there aren't any.
7871 *
7872 * (We can't necessarily avoid translating actions entirely if there
7873 * aren't any actions, because there might be some finishing-up to do
7874 * at the end of the pipeline, and we don't check for those
7875 * conditions.) */
7876 .ofpacts = any_actions ? pin->actions : &noop.ofpact,
7877 .ofpacts_len = any_actions ? pin->actions_len : sizeof noop,
7878
7879 .action_set = pin->action_set,
7880 .action_set_len = pin->action_set_len,
7881 };
7882 frozen_metadata_from_flow(&state.metadata,
4d617a87 7883 &pin->base.flow_metadata.flow);
77ab5fd2
BP
7884 xin.frozen_state = &state;
7885
7886 struct xlate_out xout;
7887 enum xlate_error error = xlate_actions(&xin, &xout);
7888 *slow = xout.slow;
7889 xlate_out_uninit(&xout);
7890
7891 /* xlate_actions() can generate a number of errors, but only
7892 * XLATE_BRIDGE_NOT_FOUND really stands out to me as one that we should be
7893 * sure to report over OpenFlow. The others could come up in packet-outs
7894 * or regular flow translation and I don't think that it's going to be too
7895 * useful to report them to the controller. */
7896 return error == XLATE_BRIDGE_NOT_FOUND ? OFPERR_NXR_STALE : 0;
7897}
7898
2eb79142
JG
7899/* Sends 'packet' out 'ofport'. If 'port' is a tunnel and that tunnel type
7900 * supports a notion of an OAM flag, sets it if 'oam' is true.
91d6cd12
AW
7901 * May modify 'packet'.
7902 * Returns 0 if successful, otherwise a positive errno value. */
7903int
2eb79142
JG
7904xlate_send_packet(const struct ofport_dpif *ofport, bool oam,
7905 struct dp_packet *packet)
91d6cd12 7906{
84f0f298 7907 struct xlate_cfg *xcfg = ovsrcu_get(struct xlate_cfg *, &xcfgp);
91d6cd12 7908 struct xport *xport;
2eb79142
JG
7909 uint64_t ofpacts_stub[1024 / 8];
7910 struct ofpbuf ofpacts;
91d6cd12 7911 struct flow flow;
91d6cd12 7912
2eb79142 7913 ofpbuf_use_stack(&ofpacts, ofpacts_stub, sizeof ofpacts_stub);
91d6cd12 7914 /* Use OFPP_NONE as the in_port to avoid special packet processing. */
cf62fa4c 7915 flow_extract(packet, &flow);
b5e7e61a 7916 flow.in_port.ofp_port = OFPP_NONE;
91d6cd12 7917
84f0f298 7918 xport = xport_lookup(xcfg, ofport);
91d6cd12 7919 if (!xport) {
02ea2703 7920 return EINVAL;
91d6cd12 7921 }
2eb79142
JG
7922
7923 if (oam) {
71f21279
BP
7924 const ovs_be16 flag = htons(NX_TUN_FLAG_OAM);
7925 ofpact_put_set_field(&ofpacts, mf_from_id(MFF_TUN_FLAGS),
7926 &flag, &flag);
2eb79142
JG
7927 }
7928
7929 ofpact_put_OUTPUT(&ofpacts)->port = xport->ofp_port;
e491a67a 7930
1f4a8933
JR
7931 /* Actions here are not referring to anything versionable (flow tables or
7932 * groups) so we don't need to worry about the version here. */
7933 return ofproto_dpif_execute_actions(xport->xbridge->ofproto,
7934 OVS_VERSION_MAX, &flow, NULL,
2eb79142 7935 ofpacts.data, ofpacts.size, packet);
9583bc14 7936}
b256dc52 7937
901a517e 7938void
064799a1
JR
7939xlate_mac_learning_update(const struct ofproto_dpif *ofproto,
7940 ofp_port_t in_port, struct eth_addr dl_src,
7941 int vlan, bool is_grat_arp)
b256dc52 7942{
84f0f298 7943 struct xlate_cfg *xcfg = ovsrcu_get(struct xlate_cfg *, &xcfgp);
b256dc52
JS
7944 struct xbridge *xbridge;
7945 struct xbundle *xbundle;
b256dc52 7946
84f0f298 7947 xbridge = xbridge_lookup(xcfg, ofproto);
b256dc52
JS
7948 if (!xbridge) {
7949 return;
7950 }
7951
2d9b49dd 7952 xbundle = lookup_input_bundle__(xbridge, in_port, NULL);
b256dc52
JS
7953 if (!xbundle) {
7954 return;
7955 }
7956
2d9b49dd 7957 update_learning_table__(xbridge, xbundle, dl_src, vlan, is_grat_arp);
b256dc52 7958}
bef503e8 7959
88186383
AZ
7960void
7961xlate_set_support(const struct ofproto_dpif *ofproto,
7962 const struct dpif_backer_support *support)
7963{
7964 struct xlate_cfg *xcfg = ovsrcu_get(struct xlate_cfg *, &xcfgp);
7965 struct xbridge *xbridge = xbridge_lookup(xcfg, ofproto);
7966
7967 if (xbridge) {
7968 xbridge->support = *support;
7969 }
7970}