]> git.proxmox.com Git - mirror_ovs.git/blob - ofproto/ofproto-dpif-xlate.c
ofproto-dpif-upcall: Remove unused macro MAX_QUEUE_LENGTH.
[mirror_ovs.git] / ofproto / ofproto-dpif-xlate.c
1 /* Copyright (c) 2009, 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017, 2019 Nicira, Inc.
2 *
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License. */
14
15 #include <config.h>
16
17 #include "ofproto/ofproto-dpif-xlate.h"
18
19 #include <errno.h>
20 #include <sys/types.h>
21 #include <netinet/in.h>
22 #include <arpa/inet.h>
23 #include <net/if.h>
24 #include <sys/socket.h>
25
26 #include "bfd.h"
27 #include "bitmap.h"
28 #include "bond.h"
29 #include "bundle.h"
30 #include "byte-order.h"
31 #include "cfm.h"
32 #include "connmgr.h"
33 #include "coverage.h"
34 #include "csum.h"
35 #include "dp-packet.h"
36 #include "dpif.h"
37 #include "in-band.h"
38 #include "lacp.h"
39 #include "learn.h"
40 #include "mac-learning.h"
41 #include "mcast-snooping.h"
42 #include "multipath.h"
43 #include "netdev-vport.h"
44 #include "netlink.h"
45 #include "nx-match.h"
46 #include "odp-execute.h"
47 #include "ofproto/ofproto-dpif-ipfix.h"
48 #include "ofproto/ofproto-dpif-mirror.h"
49 #include "ofproto/ofproto-dpif-monitor.h"
50 #include "ofproto/ofproto-dpif-sflow.h"
51 #include "ofproto/ofproto-dpif-trace.h"
52 #include "ofproto/ofproto-dpif-xlate-cache.h"
53 #include "ofproto/ofproto-dpif.h"
54 #include "ofproto/ofproto-provider.h"
55 #include "openvswitch/dynamic-string.h"
56 #include "openvswitch/meta-flow.h"
57 #include "openvswitch/list.h"
58 #include "openvswitch/ofp-actions.h"
59 #include "openvswitch/ofp-ed-props.h"
60 #include "openvswitch/vlog.h"
61 #include "ovs-lldp.h"
62 #include "ovs-router.h"
63 #include "packets.h"
64 #include "tnl-neigh-cache.h"
65 #include "tnl-ports.h"
66 #include "tunnel.h"
67 #include "util.h"
68 #include "uuid.h"
69
70 COVERAGE_DEFINE(xlate_actions);
71 COVERAGE_DEFINE(xlate_actions_oversize);
72 COVERAGE_DEFINE(xlate_actions_too_many_output);
73
74 VLOG_DEFINE_THIS_MODULE(ofproto_dpif_xlate);
75
76 /* Maximum depth of flow table recursion (due to resubmit actions) in a
77 * flow translation.
78 *
79 * The goal of limiting the depth of resubmits is to ensure that flow
80 * translation eventually terminates. Only resubmits to the same table or an
81 * earlier table count against the maximum depth. This is because resubmits to
82 * strictly monotonically increasing table IDs will eventually terminate, since
83 * any OpenFlow switch has a finite number of tables. OpenFlow tables are most
84 * commonly traversed in numerically increasing order, so this limit has little
85 * effect on conventionally designed OpenFlow pipelines.
86 *
87 * Outputs to patch ports and to groups also count against the depth limit. */
88 #define MAX_DEPTH 64
89
90 /* Maximum number of resubmit actions in a flow translation, whether they are
91 * recursive or not. */
92 #define MAX_RESUBMITS (MAX_DEPTH * MAX_DEPTH)
93
94 /* The structure holds an array of IP addresses assigned to a bridge and the
95 * number of elements in the array. These data are mutable and are evaluated
96 * when ARP or Neighbor Advertisement packets received on a native tunnel
97 * port are xlated. So 'ref_cnt' and RCU are used for synchronization. */
98 struct xbridge_addr {
99 struct in6_addr *addr; /* Array of IP addresses of xbridge. */
100 int n_addr; /* Number of IP addresses. */
101 struct ovs_refcount ref_cnt;
102 };
103
104 struct xbridge {
105 struct hmap_node hmap_node; /* Node in global 'xbridges' map. */
106 struct ofproto_dpif *ofproto; /* Key in global 'xbridges' map. */
107
108 struct ovs_list xbundles; /* Owned xbundles. */
109 struct hmap xports; /* Indexed by ofp_port. */
110
111 char *name; /* Name used in log messages. */
112 struct dpif *dpif; /* Datapath interface. */
113 struct mac_learning *ml; /* Mac learning handle. */
114 struct mcast_snooping *ms; /* Multicast Snooping handle. */
115 struct mbridge *mbridge; /* Mirroring. */
116 struct dpif_sflow *sflow; /* SFlow handle, or null. */
117 struct dpif_ipfix *ipfix; /* Ipfix handle, or null. */
118 struct netflow *netflow; /* Netflow handle, or null. */
119 struct stp *stp; /* STP or null if disabled. */
120 struct rstp *rstp; /* RSTP or null if disabled. */
121
122 bool has_in_band; /* Bridge has in band control? */
123 bool forward_bpdu; /* Bridge forwards STP BPDUs? */
124
125 /* Datapath feature support. */
126 struct dpif_backer_support support;
127
128 struct xbridge_addr *addr;
129 };
130
131 struct xbundle {
132 struct hmap_node hmap_node; /* In global 'xbundles' map. */
133 struct ofbundle *ofbundle; /* Key in global 'xbundles' map. */
134
135 struct ovs_list list_node; /* In parent 'xbridges' list. */
136 struct xbridge *xbridge; /* Parent xbridge. */
137
138 struct ovs_list xports; /* Contains "struct xport"s. */
139
140 char *name; /* Name used in log messages. */
141 struct bond *bond; /* Nonnull iff more than one port. */
142 struct lacp *lacp; /* LACP handle or null. */
143
144 enum port_vlan_mode vlan_mode; /* VLAN mode. */
145 uint16_t qinq_ethtype; /* Ethertype of dot1q-tunnel interface
146 * either 0x8100 or 0x88a8. */
147 int vlan; /* -1=trunk port, else a 12-bit VLAN ID. */
148 unsigned long *trunks; /* Bitmap of trunked VLANs, if 'vlan' == -1.
149 * NULL if all VLANs are trunked. */
150 unsigned long *cvlans; /* Bitmap of allowed customer vlans,
151 * NULL if all VLANs are allowed */
152 enum port_priority_tags_mode use_priority_tags;
153 /* Use 802.1p tag for frames in VLAN 0? */
154 bool floodable; /* No port has OFPUTIL_PC_NO_FLOOD set? */
155 bool protected; /* Protected port mode */
156 };
157
158 struct xport {
159 struct hmap_node hmap_node; /* Node in global 'xports' map. */
160 struct ofport_dpif *ofport; /* Key in global 'xports map. */
161
162 struct hmap_node ofp_node; /* Node in parent xbridge 'xports' map. */
163 ofp_port_t ofp_port; /* Key in parent xbridge 'xports' map. */
164
165 struct hmap_node uuid_node; /* Node in global 'xports_uuid' map. */
166 struct uuid uuid; /* Key in global 'xports_uuid' map. */
167
168 odp_port_t odp_port; /* Datapath port number or ODPP_NONE. */
169
170 struct ovs_list bundle_node; /* In parent xbundle (if it exists). */
171 struct xbundle *xbundle; /* Parent xbundle or null. */
172
173 struct netdev *netdev; /* 'ofport''s netdev. */
174
175 struct xbridge *xbridge; /* Parent bridge. */
176 struct xport *peer; /* Patch port peer or null. */
177
178 enum ofputil_port_config config; /* OpenFlow port configuration. */
179 enum ofputil_port_state state; /* OpenFlow port state. */
180 int stp_port_no; /* STP port number or -1 if not in use. */
181 struct rstp_port *rstp_port; /* RSTP port or null. */
182
183 struct hmap skb_priorities; /* Map of 'skb_priority_to_dscp's. */
184
185 bool may_enable; /* May be enabled in bonds. */
186 bool is_tunnel; /* Is a tunnel port. */
187 enum netdev_pt_mode pt_mode; /* packet_type handling. */
188
189 struct cfm *cfm; /* CFM handle or null. */
190 struct bfd *bfd; /* BFD handle or null. */
191 struct lldp *lldp; /* LLDP handle or null. */
192 };
193
194 struct xlate_ctx {
195 struct xlate_in *xin;
196 struct xlate_out *xout;
197
198 struct xlate_cfg *xcfg;
199 const struct xbridge *xbridge;
200
201 /* Flow at the last commit. */
202 struct flow base_flow;
203
204 /* Tunnel IP destination address as received. This is stored separately
205 * as the base_flow.tunnel is cleared on init to reflect the datapath
206 * behavior. Used to make sure not to send tunneled output to ourselves,
207 * which might lead to an infinite loop. This could happen easily
208 * if a tunnel is marked as 'ip_remote=flow', and the flow does not
209 * actually set the tun_dst field. */
210 struct in6_addr orig_tunnel_ipv6_dst;
211
212 /* Stack for the push and pop actions. See comment above nx_stack_push()
213 * in nx-match.c for info on how the stack is stored. */
214 struct ofpbuf stack;
215
216 /* The rule that we are currently translating, or NULL. */
217 struct rule_dpif *rule;
218
219 /* Flow translation populates this with wildcards relevant in translation.
220 * When 'xin->wc' is nonnull, this is the same pointer. When 'xin->wc' is
221 * null, this is a pointer to a temporary buffer. */
222 struct flow_wildcards *wc;
223
224 /* Output buffer for datapath actions. When 'xin->odp_actions' is nonnull,
225 * this is the same pointer. When 'xin->odp_actions' is null, this points
226 * to a scratch ofpbuf. This allows code to add actions to
227 * 'ctx->odp_actions' without worrying about whether the caller really
228 * wants actions. */
229 struct ofpbuf *odp_actions;
230
231 /* Statistics maintained by xlate_table_action().
232 *
233 * These statistics limit the amount of work that a single flow
234 * translation can perform. The goal of the first of these, 'depth', is
235 * primarily to prevent translation from performing an infinite amount of
236 * work. It counts the current depth of nested "resubmit"s (and a few
237 * other activities); when a resubmit returns, it decreases. Resubmits to
238 * tables in strictly monotonically increasing order don't contribute to
239 * 'depth' because they cannot cause a flow translation to take an infinite
240 * amount of time (because the number of tables is finite). Translation
241 * aborts when 'depth' exceeds MAX_DEPTH.
242 *
243 * 'resubmits', on the other hand, prevents flow translation from
244 * performing an extraordinarily large while still finite amount of work.
245 * It counts the total number of resubmits (and a few other activities)
246 * that have been executed. Returning from a resubmit does not affect this
247 * counter. Thus, this limits the amount of work that a particular
248 * translation can perform. Translation aborts when 'resubmits' exceeds
249 * MAX_RESUBMITS (which is much larger than MAX_DEPTH).
250 */
251 int depth; /* Current resubmit nesting depth. */
252 int resubmits; /* Total number of resubmits. */
253 bool in_action_set; /* Currently translating action_set, if true. */
254 bool in_packet_out; /* Currently translating a packet_out msg, if
255 * true. */
256 bool pending_encap; /* True when waiting to commit a pending
257 * encap action. */
258 bool pending_decap; /* True when waiting to commit a pending
259 * decap action. */
260 struct ofpbuf *encap_data; /* May contain a pointer to an ofpbuf with
261 * context for the datapath encap action.*/
262
263 uint8_t table_id; /* OpenFlow table ID where flow was found. */
264 ovs_be64 rule_cookie; /* Cookie of the rule being translated. */
265 uint32_t orig_skb_priority; /* Priority when packet arrived. */
266 uint32_t sflow_n_outputs; /* Number of output ports. */
267 odp_port_t sflow_odp_port; /* Output port for composing sFlow action. */
268 ofp_port_t nf_output_iface; /* Output interface index for NetFlow. */
269 bool exit; /* No further actions should be processed. */
270 mirror_mask_t mirrors; /* Bitmap of associated mirrors. */
271 int mirror_snaplen; /* Max size of a mirror packet in byte. */
272
273 /* Freezing Translation
274 * ====================
275 *
276 * At some point during translation, the code may recognize the need to halt
277 * and checkpoint the translation in a way that it can be restarted again
278 * later. We call the checkpointing process "freezing" and the restarting
279 * process "thawing".
280 *
281 * The use cases for freezing are:
282 *
283 * - "Recirculation", where the translation process discovers that it
284 * doesn't have enough information to complete translation without
285 * actually executing the actions that have already been translated,
286 * which provides the additionally needed information. In these
287 * situations, translation freezes translation and assigns the frozen
288 * data a unique "recirculation ID", which it associates with the data
289 * in a table in userspace (see ofproto-dpif-rid.h). It also adds a
290 * OVS_ACTION_ATTR_RECIRC action specifying that ID to the datapath
291 * actions. When a packet hits that action, the datapath looks its
292 * flow up again using the ID. If there's a miss, it comes back to
293 * userspace, which find the recirculation table entry for the ID,
294 * thaws the associated frozen data, and continues translation from
295 * that point given the additional information that is now known.
296 *
297 * The archetypal example is MPLS. As MPLS is implemented in
298 * OpenFlow, the protocol that follows the last MPLS label becomes
299 * known only when that label is popped by an OpenFlow action. That
300 * means that Open vSwitch can't extract the headers beyond the MPLS
301 * labels until the pop action is executed. Thus, at that point
302 * translation uses the recirculation process to extract the headers
303 * beyond the MPLS labels.
304 *
305 * (OVS also uses OVS_ACTION_ATTR_RECIRC to implement hashing for
306 * output to bonds. OVS pre-populates all the datapath flows for bond
307 * output in the datapath, though, which means that the elaborate
308 * process of coming back to userspace for a second round of
309 * translation isn't needed, and so bonds don't follow the above
310 * process.)
311 *
312 * - "Continuation". A continuation is a way for an OpenFlow controller
313 * to interpose on a packet's traversal of the OpenFlow tables. When
314 * the translation process encounters a "controller" action with the
315 * "pause" flag, it freezes translation, serializes the frozen data,
316 * and sends it to an OpenFlow controller. The controller then
317 * examines and possibly modifies the frozen data and eventually sends
318 * it back to the switch, which thaws it and continues translation.
319 *
320 * The main problem of freezing translation is preserving state, so that
321 * when the translation is thawed later it resumes from where it left off,
322 * without disruption. In particular, actions must be preserved as follows:
323 *
324 * - If we're freezing because an action needed more information, the
325 * action that prompted it.
326 *
327 * - Any actions remaining to be translated within the current flow.
328 *
329 * - If translation was frozen within a NXAST_RESUBMIT, then any actions
330 * following the resubmit action. Resubmit actions can be nested, so
331 * this has to go all the way up the control stack.
332 *
333 * - The OpenFlow 1.1+ action set.
334 *
335 * State that actions and flow table lookups can depend on, such as the
336 * following, must also be preserved:
337 *
338 * - Metadata fields (input port, registers, OF1.1+ metadata, ...).
339 *
340 * - The stack used by NXAST_STACK_PUSH and NXAST_STACK_POP actions.
341 *
342 * - The table ID and cookie of the flow being translated at each level
343 * of the control stack, because these can become visible through
344 * OFPAT_CONTROLLER actions (and other ways).
345 *
346 * Translation allows for the control of this state preservation via these
347 * members. When a need to freeze translation is identified, the
348 * translation process:
349 *
350 * 1. Sets 'freezing' to true.
351 *
352 * 2. Sets 'exit' to true to tell later steps that we're exiting from the
353 * translation process.
354 *
355 * 3. Adds an OFPACT_UNROLL_XLATE action to 'frozen_actions', and points
356 * frozen_actions.header to the action to make it easy to find it later.
357 * This action holds the current table ID and cookie so that they can be
358 * restored during a post-recirculation upcall translation.
359 *
360 * 4. Adds the action that prompted recirculation and any actions following
361 * it within the same flow to 'frozen_actions', so that they can be
362 * executed during a post-recirculation upcall translation.
363 *
364 * 5. Returns.
365 *
366 * 6. The action that prompted recirculation might be nested in a stack of
367 * nested "resubmit"s that have actions remaining. Each of these notices
368 * that we're exiting and freezing and responds by adding more
369 * OFPACT_UNROLL_XLATE actions to 'frozen_actions', as necessary,
370 * followed by any actions that were yet unprocessed.
371 *
372 * If we're freezing because of recirculation, the caller generates a
373 * recirculation ID and associates all the state produced by this process
374 * with it. For post-recirculation upcall translation, the caller passes it
375 * back in for the new translation to execute. The process yielded a set of
376 * ofpacts that can be translated directly, so it is not much of a special
377 * case at that point.
378 */
379 bool freezing;
380 bool recirc_update_dp_hash; /* Generated recirculation will be preceded
381 * by datapath HASH action to get an updated
382 * dp_hash after recirculation. */
383 uint32_t dp_hash_alg;
384 uint32_t dp_hash_basis;
385 struct ofpbuf frozen_actions;
386 const struct ofpact_controller *pause;
387
388 /* True if a packet was but is no longer MPLS (due to an MPLS pop action).
389 * This is a trigger for recirculation in cases where translating an action
390 * or looking up a flow requires access to the fields of the packet after
391 * the MPLS label stack that was originally present. */
392 bool was_mpls;
393
394 /* True if conntrack has been performed on this packet during processing
395 * on the current bridge. This is used to determine whether conntrack
396 * state from the datapath should be honored after thawing. */
397 bool conntracked;
398
399 /* Pointer to an embedded NAT action in a conntrack action, or NULL. */
400 struct ofpact_nat *ct_nat_action;
401
402 /* OpenFlow 1.1+ action set.
403 *
404 * 'action_set' accumulates "struct ofpact"s added by OFPACT_WRITE_ACTIONS.
405 * When translation is otherwise complete, ofpacts_execute_action_set()
406 * converts it to a set of "struct ofpact"s that can be translated into
407 * datapath actions. */
408 bool action_set_has_group; /* Action set contains OFPACT_GROUP? */
409 struct ofpbuf action_set; /* Action set. */
410
411 enum xlate_error error; /* Translation failed. */
412 };
413
414 /* Structure to track VLAN manipulation */
415 struct xvlan_single {
416 uint16_t tpid;
417 uint16_t vid;
418 uint16_t pcp;
419 };
420
421 struct xvlan {
422 struct xvlan_single v[FLOW_MAX_VLAN_HEADERS];
423 };
424
425 const char *xlate_strerror(enum xlate_error error)
426 {
427 switch (error) {
428 case XLATE_OK:
429 return "OK";
430 case XLATE_BRIDGE_NOT_FOUND:
431 return "Bridge not found";
432 case XLATE_RECURSION_TOO_DEEP:
433 return "Recursion too deep";
434 case XLATE_TOO_MANY_RESUBMITS:
435 return "Too many resubmits";
436 case XLATE_STACK_TOO_DEEP:
437 return "Stack too deep";
438 case XLATE_NO_RECIRCULATION_CONTEXT:
439 return "No recirculation context";
440 case XLATE_RECIRCULATION_CONFLICT:
441 return "Recirculation conflict";
442 case XLATE_TOO_MANY_MPLS_LABELS:
443 return "Too many MPLS labels";
444 case XLATE_INVALID_TUNNEL_METADATA:
445 return "Invalid tunnel metadata";
446 case XLATE_UNSUPPORTED_PACKET_TYPE:
447 return "Unsupported packet type";
448 }
449 return "Unknown error";
450 }
451
452 static void xlate_action_set(struct xlate_ctx *ctx);
453 static void xlate_commit_actions(struct xlate_ctx *ctx);
454
455 static void
456 patch_port_output(struct xlate_ctx *ctx, const struct xport *in_dev,
457 struct xport *out_dev);
458
459 static void
460 ctx_trigger_freeze(struct xlate_ctx *ctx)
461 {
462 ctx->exit = true;
463 ctx->freezing = true;
464 }
465
466 static void
467 ctx_trigger_recirculate_with_hash(struct xlate_ctx *ctx, uint32_t type,
468 uint32_t basis)
469 {
470 ctx->exit = true;
471 ctx->freezing = true;
472 ctx->recirc_update_dp_hash = true;
473 ctx->dp_hash_alg = type;
474 ctx->dp_hash_basis = basis;
475 }
476
477 static bool
478 ctx_first_frozen_action(const struct xlate_ctx *ctx)
479 {
480 return !ctx->frozen_actions.size;
481 }
482
483 static void
484 ctx_cancel_freeze(struct xlate_ctx *ctx)
485 {
486 if (ctx->freezing) {
487 ctx->freezing = false;
488 ctx->recirc_update_dp_hash = false;
489 ofpbuf_clear(&ctx->frozen_actions);
490 ctx->frozen_actions.header = NULL;
491 ctx->pause = NULL;
492 }
493 }
494
495 static void finish_freezing(struct xlate_ctx *ctx);
496
497 /* A controller may use OFPP_NONE as the ingress port to indicate that
498 * it did not arrive on a "real" port. 'ofpp_none_bundle' exists for
499 * when an input bundle is needed for validation (e.g., mirroring or
500 * OFPP_NORMAL processing). It is not connected to an 'ofproto' or have
501 * any 'port' structs, so care must be taken when dealing with it. */
502 static struct xbundle ofpp_none_bundle = {
503 .name = "OFPP_NONE",
504 .vlan_mode = PORT_VLAN_TRUNK
505 };
506
507 /* Node in 'xport''s 'skb_priorities' map. Used to maintain a map from
508 * 'priority' (the datapath's term for QoS queue) to the dscp bits which all
509 * traffic egressing the 'ofport' with that priority should be marked with. */
510 struct skb_priority_to_dscp {
511 struct hmap_node hmap_node; /* Node in 'ofport_dpif''s 'skb_priorities'. */
512 uint32_t skb_priority; /* Priority of this queue (see struct flow). */
513
514 uint8_t dscp; /* DSCP bits to mark outgoing traffic with. */
515 };
516
517 /* Xlate config contains hash maps of all bridges, bundles and ports.
518 * Xcfgp contains the pointer to the current xlate configuration.
519 * When the main thread needs to change the configuration, it copies xcfgp to
520 * new_xcfg and edits new_xcfg. This enables the use of RCU locking which
521 * does not block handler and revalidator threads. */
522 struct xlate_cfg {
523 struct hmap xbridges;
524 struct hmap xbundles;
525 struct hmap xports;
526 struct hmap xports_uuid;
527 };
528 static OVSRCU_TYPE(struct xlate_cfg *) xcfgp = OVSRCU_INITIALIZER(NULL);
529 static struct xlate_cfg *new_xcfg = NULL;
530
531 typedef void xlate_actions_handler(const struct ofpact *, size_t ofpacts_len,
532 struct xlate_ctx *, bool, bool);
533 static bool may_receive(const struct xport *, struct xlate_ctx *);
534 static void do_xlate_actions(const struct ofpact *, size_t ofpacts_len,
535 struct xlate_ctx *, bool, bool);
536 static void clone_xlate_actions(const struct ofpact *, size_t ofpacts_len,
537 struct xlate_ctx *, bool, bool);
538 static void xlate_normal(struct xlate_ctx *);
539 static void xlate_normal_flood(struct xlate_ctx *ct,
540 struct xbundle *in_xbundle, struct xvlan *);
541 static void xlate_table_action(struct xlate_ctx *, ofp_port_t in_port,
542 uint8_t table_id, bool may_packet_in,
543 bool honor_table_miss, bool with_ct_orig,
544 bool is_last_action, xlate_actions_handler *);
545
546 static bool input_vid_is_valid(const struct xlate_ctx *,
547 uint16_t vid, struct xbundle *);
548 static void xvlan_copy(struct xvlan *dst, const struct xvlan *src);
549 static void xvlan_pop(struct xvlan *src);
550 static void xvlan_push_uninit(struct xvlan *src);
551 static void xvlan_extract(const struct flow *, struct xvlan *);
552 static void xvlan_put(struct flow *, const struct xvlan *,
553 enum port_priority_tags_mode);
554 static void xvlan_input_translate(const struct xbundle *,
555 const struct xvlan *in,
556 struct xvlan *xvlan);
557 static void xvlan_output_translate(const struct xbundle *,
558 const struct xvlan *xvlan,
559 struct xvlan *out);
560 static void output_normal(struct xlate_ctx *, const struct xbundle *,
561 const struct xvlan *);
562
563 /* Optional bond recirculation parameter to compose_output_action(). */
564 struct xlate_bond_recirc {
565 uint32_t recirc_id; /* !0 Use recirculation instead of output. */
566 uint8_t hash_alg; /* !0 Compute hash for recirc before. */
567 uint32_t hash_basis; /* Compute hash for recirc before. */
568 };
569
570 static void compose_output_action(struct xlate_ctx *, ofp_port_t ofp_port,
571 const struct xlate_bond_recirc *xr,
572 bool is_last_action, bool truncate);
573
574 static struct xbridge *xbridge_lookup(struct xlate_cfg *,
575 const struct ofproto_dpif *);
576 static struct xbridge *xbridge_lookup_by_uuid(struct xlate_cfg *,
577 const struct uuid *);
578 static struct xbundle *xbundle_lookup(struct xlate_cfg *,
579 const struct ofbundle *);
580 static struct xport *xport_lookup(struct xlate_cfg *,
581 const struct ofport_dpif *);
582 static struct xport *xport_lookup_by_uuid(struct xlate_cfg *,
583 const struct uuid *);
584 static struct xport *get_ofp_port(const struct xbridge *, ofp_port_t ofp_port);
585 static struct skb_priority_to_dscp *get_skb_priority(const struct xport *,
586 uint32_t skb_priority);
587 static void clear_skb_priorities(struct xport *);
588 static size_t count_skb_priorities(const struct xport *);
589 static bool dscp_from_skb_priority(const struct xport *, uint32_t skb_priority,
590 uint8_t *dscp);
591
592 static void xlate_xbridge_init(struct xlate_cfg *, struct xbridge *);
593 static void xlate_xbundle_init(struct xlate_cfg *, struct xbundle *);
594 static void xlate_xport_init(struct xlate_cfg *, struct xport *);
595 static void xlate_xbridge_set(struct xbridge *, struct dpif *,
596 const struct mac_learning *, struct stp *,
597 struct rstp *, const struct mcast_snooping *,
598 const struct mbridge *,
599 const struct dpif_sflow *,
600 const struct dpif_ipfix *,
601 const struct netflow *,
602 bool forward_bpdu, bool has_in_band,
603 const struct dpif_backer_support *,
604 const struct xbridge_addr *);
605 static void xlate_xbundle_set(struct xbundle *xbundle,
606 enum port_vlan_mode vlan_mode,
607 uint16_t qinq_ethtype, int vlan,
608 unsigned long *trunks, unsigned long *cvlans,
609 enum port_priority_tags_mode,
610 const struct bond *bond, const struct lacp *lacp,
611 bool floodable, bool protected);
612 static void xlate_xport_set(struct xport *xport, odp_port_t odp_port,
613 const struct netdev *netdev, const struct cfm *cfm,
614 const struct bfd *bfd, const struct lldp *lldp,
615 int stp_port_no, const struct rstp_port *rstp_port,
616 enum ofputil_port_config config,
617 enum ofputil_port_state state, bool is_tunnel,
618 bool may_enable);
619 static void xlate_xbridge_remove(struct xlate_cfg *, struct xbridge *);
620 static void xlate_xbundle_remove(struct xlate_cfg *, struct xbundle *);
621 static void xlate_xport_remove(struct xlate_cfg *, struct xport *);
622 static void xlate_xbridge_copy(struct xbridge *);
623 static void xlate_xbundle_copy(struct xbridge *, struct xbundle *);
624 static void xlate_xport_copy(struct xbridge *, struct xbundle *,
625 struct xport *);
626 static void xlate_xcfg_free(struct xlate_cfg *);
627 \f
628 /* Tracing helpers. */
629
630 /* If tracing is enabled in 'ctx', creates a new trace node and appends it to
631 * the list of nodes maintained in ctx->xin. The new node has type 'type' and
632 * its text is created from 'format' by treating it as a printf format string.
633 * Returns the list of nodes embedded within the new trace node; ordinarily,
634 * the calleer can ignore this, but it is useful if the caller needs to nest
635 * more trace nodes within the new node.
636 *
637 * If tracing is not enabled, does nothing and returns NULL. */
638 static struct ovs_list * OVS_PRINTF_FORMAT(3, 4)
639 xlate_report(const struct xlate_ctx *ctx, enum oftrace_node_type type,
640 const char *format, ...)
641 {
642 struct ovs_list *subtrace = NULL;
643 if (OVS_UNLIKELY(ctx->xin->trace)) {
644 va_list args;
645 va_start(args, format);
646 char *text = xvasprintf(format, args);
647 subtrace = &oftrace_report(ctx->xin->trace, type, text)->subs;
648 va_end(args);
649 free(text);
650 }
651 return subtrace;
652 }
653
654 /* This is like xlate_report() for errors that are serious enough that we
655 * should log them even if we are not tracing. */
656 static void OVS_PRINTF_FORMAT(2, 3)
657 xlate_report_error(const struct xlate_ctx *ctx, const char *format, ...)
658 {
659 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
660 if (!OVS_UNLIKELY(ctx->xin->trace)
661 && (!ctx->xin->packet || VLOG_DROP_WARN(&rl))) {
662 return;
663 }
664
665 struct ds s = DS_EMPTY_INITIALIZER;
666 va_list args;
667 va_start(args, format);
668 ds_put_format_valist(&s, format, args);
669 va_end(args);
670
671 if (ctx->xin->trace) {
672 oftrace_report(ctx->xin->trace, OFT_ERROR, ds_cstr(&s));
673 } else {
674 ds_put_format(&s, " on bridge %s while processing ",
675 ctx->xbridge->name);
676 flow_format(&s, &ctx->base_flow, NULL);
677 VLOG_WARN("%s", ds_cstr(&s));
678 }
679 ds_destroy(&s);
680 }
681
682 /* This is like xlate_report() for messages that should be logged
683 at the info level (even when not tracing). */
684 static void OVS_PRINTF_FORMAT(2, 3)
685 xlate_report_info(const struct xlate_ctx *ctx, const char *format, ...)
686 {
687 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
688 if (!OVS_UNLIKELY(ctx->xin->trace)
689 && (!ctx->xin->packet || VLOG_DROP_INFO(&rl))) {
690 return;
691 }
692
693 struct ds s = DS_EMPTY_INITIALIZER;
694 va_list args;
695 va_start(args, format);
696 ds_put_format_valist(&s, format, args);
697 va_end(args);
698
699 if (ctx->xin->trace) {
700 oftrace_report(ctx->xin->trace, OFT_WARN, ds_cstr(&s));
701 } else {
702 ds_put_format(&s, " on bridge %s while processing ",
703 ctx->xbridge->name);
704 flow_format(&s, &ctx->base_flow, NULL);
705 VLOG_INFO("%s", ds_cstr(&s));
706 }
707 ds_destroy(&s);
708 }
709
710 /* This is like xlate_report() for messages that should be logged at debug
711 * level (even if we are not tracing) because they can be valuable for
712 * debugging. */
713 static void OVS_PRINTF_FORMAT(3, 4)
714 xlate_report_debug(const struct xlate_ctx *ctx, enum oftrace_node_type type,
715 const char *format, ...)
716 {
717 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(30, 300);
718 if (!OVS_UNLIKELY(ctx->xin->trace)
719 && (!ctx->xin->packet || VLOG_DROP_DBG(&rl))) {
720 return;
721 }
722
723 struct ds s = DS_EMPTY_INITIALIZER;
724 va_list args;
725 va_start(args, format);
726 ds_put_format_valist(&s, format, args);
727 va_end(args);
728
729 if (ctx->xin->trace) {
730 oftrace_report(ctx->xin->trace, type, ds_cstr(&s));
731 } else {
732 VLOG_DBG("bridge %s: %s", ctx->xbridge->name, ds_cstr(&s));
733 }
734 ds_destroy(&s);
735 }
736
737 /* If tracing is enabled in 'ctx', appends a node of the given 'type' to the
738 * trace, whose text is 'title' followed by a formatted version of the
739 * 'ofpacts_len' OpenFlow actions in 'ofpacts'.
740 *
741 * If tracing is not enabled, does nothing. */
742 static void
743 xlate_report_actions(const struct xlate_ctx *ctx, enum oftrace_node_type type,
744 const char *title,
745 const struct ofpact *ofpacts, size_t ofpacts_len)
746 {
747 if (OVS_UNLIKELY(ctx->xin->trace)) {
748 struct ds s = DS_EMPTY_INITIALIZER;
749 ds_put_format(&s, "%s: ", title);
750 struct ofpact_format_params fp = { .s = &s };
751 ofpacts_format(ofpacts, ofpacts_len, &fp);
752 oftrace_report(ctx->xin->trace, type, ds_cstr(&s));
753 ds_destroy(&s);
754 }
755 }
756
757 /* If tracing is enabled in 'ctx', appends a node of type OFT_DETAIL to the
758 * trace, whose the message is a formatted version of the OpenFlow action set.
759 * 'verb' should be "was" or "is", depending on whether the action set reported
760 * is the new action set or the old one.
761 *
762 * If tracing is not enabled, does nothing. */
763 static void
764 xlate_report_action_set(const struct xlate_ctx *ctx, const char *verb)
765 {
766 if (OVS_UNLIKELY(ctx->xin->trace)) {
767 struct ofpbuf action_list;
768 ofpbuf_init(&action_list, 0);
769 ofpacts_execute_action_set(&action_list, &ctx->action_set);
770 if (action_list.size) {
771 struct ds s = DS_EMPTY_INITIALIZER;
772 struct ofpact_format_params fp = { .s = &s };
773 ofpacts_format(action_list.data, action_list.size, &fp);
774 xlate_report(ctx, OFT_DETAIL, "action set %s: %s",
775 verb, ds_cstr(&s));
776 ds_destroy(&s);
777 } else {
778 xlate_report(ctx, OFT_DETAIL, "action set %s empty", verb);
779 }
780 ofpbuf_uninit(&action_list);
781 }
782 }
783
784
785 /* If tracing is enabled in 'ctx', appends a node representing 'rule' (in
786 * OpenFlow table 'table_id') to the trace and makes this node the parent for
787 * future trace nodes. The caller should save ctx->xin->trace before calling
788 * this function, then after tracing all of the activities under the table,
789 * restore its previous value.
790 *
791 * If tracing is not enabled, does nothing. */
792 static void
793 xlate_report_table(const struct xlate_ctx *ctx, struct rule_dpif *rule,
794 uint8_t table_id)
795 {
796 if (OVS_LIKELY(!ctx->xin->trace)) {
797 return;
798 }
799
800 struct ds s = DS_EMPTY_INITIALIZER;
801 ds_put_format(&s, "%2d. ", table_id);
802 if (rule == ctx->xin->ofproto->miss_rule) {
803 ds_put_cstr(&s, "No match, and a \"packet-in\" is called for.");
804 } else if (rule == ctx->xin->ofproto->no_packet_in_rule) {
805 ds_put_cstr(&s, "No match.");
806 } else if (rule == ctx->xin->ofproto->drop_frags_rule) {
807 ds_put_cstr(&s, "Packets are IP fragments and "
808 "the fragment handling mode is \"drop\".");
809 } else {
810 minimatch_format(&rule->up.cr.match,
811 ofproto_get_tun_tab(&ctx->xin->ofproto->up),
812 NULL, &s, OFP_DEFAULT_PRIORITY);
813 if (ds_last(&s) != ' ') {
814 ds_put_cstr(&s, ", ");
815 }
816 ds_put_format(&s, "priority %d", rule->up.cr.priority);
817 if (rule->up.flow_cookie) {
818 ds_put_format(&s, ", cookie %#"PRIx64,
819 ntohll(rule->up.flow_cookie));
820 }
821 }
822 ctx->xin->trace = &oftrace_report(ctx->xin->trace, OFT_TABLE,
823 ds_cstr(&s))->subs;
824 ds_destroy(&s);
825 }
826
827 /* If tracing is enabled in 'ctx', adds an OFT_DETAIL trace node to 'ctx'
828 * reporting the value of subfield 'sf'.
829 *
830 * If tracing is not enabled, does nothing. */
831 static void
832 xlate_report_subfield(const struct xlate_ctx *ctx,
833 const struct mf_subfield *sf)
834 {
835 if (OVS_UNLIKELY(ctx->xin->trace)) {
836 struct ds s = DS_EMPTY_INITIALIZER;
837 mf_format_subfield(sf, &s);
838 ds_put_cstr(&s, " is now ");
839
840 if (sf->ofs == 0 && sf->n_bits >= sf->field->n_bits) {
841 union mf_value value;
842 mf_get_value(sf->field, &ctx->xin->flow, &value);
843 mf_format(sf->field, &value, NULL, NULL, &s);
844 } else {
845 union mf_subvalue cst;
846 mf_read_subfield(sf, &ctx->xin->flow, &cst);
847 ds_put_hex(&s, &cst, sizeof cst);
848 }
849
850 xlate_report(ctx, OFT_DETAIL, "%s", ds_cstr(&s));
851
852 ds_destroy(&s);
853 }
854 }
855 \f
856 static void
857 xlate_xbridge_init(struct xlate_cfg *xcfg, struct xbridge *xbridge)
858 {
859 ovs_list_init(&xbridge->xbundles);
860 hmap_init(&xbridge->xports);
861 hmap_insert(&xcfg->xbridges, &xbridge->hmap_node,
862 hash_pointer(xbridge->ofproto, 0));
863 }
864
865 static void
866 xlate_xbundle_init(struct xlate_cfg *xcfg, struct xbundle *xbundle)
867 {
868 ovs_list_init(&xbundle->xports);
869 ovs_list_insert(&xbundle->xbridge->xbundles, &xbundle->list_node);
870 hmap_insert(&xcfg->xbundles, &xbundle->hmap_node,
871 hash_pointer(xbundle->ofbundle, 0));
872 }
873
874 static void
875 xlate_xport_init(struct xlate_cfg *xcfg, struct xport *xport)
876 {
877 hmap_init(&xport->skb_priorities);
878 hmap_insert(&xcfg->xports, &xport->hmap_node,
879 hash_pointer(xport->ofport, 0));
880 hmap_insert(&xport->xbridge->xports, &xport->ofp_node,
881 hash_ofp_port(xport->ofp_port));
882 hmap_insert(&xcfg->xports_uuid, &xport->uuid_node,
883 uuid_hash(&xport->uuid));
884 }
885
886 static struct xbridge_addr *
887 xbridge_addr_create(struct xbridge *xbridge)
888 {
889 struct xbridge_addr *xbridge_addr = xbridge->addr;
890 struct in6_addr *addr = NULL, *mask = NULL;
891 struct netdev *dev;
892 int err, n_addr = 0;
893
894 err = netdev_open(xbridge->name, NULL, &dev);
895 if (!err) {
896 err = netdev_get_addr_list(dev, &addr, &mask, &n_addr);
897 if (!err) {
898 if (!xbridge->addr ||
899 n_addr != xbridge->addr->n_addr ||
900 (xbridge->addr->addr && memcmp(addr, xbridge->addr->addr,
901 sizeof(*addr) * n_addr))) {
902 xbridge_addr = xzalloc(sizeof *xbridge_addr);
903 xbridge_addr->addr = addr;
904 xbridge_addr->n_addr = n_addr;
905 ovs_refcount_init(&xbridge_addr->ref_cnt);
906 } else {
907 free(addr);
908 }
909 free(mask);
910 }
911 netdev_close(dev);
912 }
913
914 return xbridge_addr;
915 }
916
917 static struct xbridge_addr *
918 xbridge_addr_ref(const struct xbridge_addr *addr_)
919 {
920 struct xbridge_addr *addr = CONST_CAST(struct xbridge_addr *, addr_);
921 if (addr) {
922 ovs_refcount_ref(&addr->ref_cnt);
923 }
924 return addr;
925 }
926
927 static void
928 xbridge_addr_unref(struct xbridge_addr *addr)
929 {
930 if (addr && ovs_refcount_unref_relaxed(&addr->ref_cnt) == 1) {
931 free(addr->addr);
932 free(addr);
933 }
934 }
935
936 static void
937 xlate_xbridge_set(struct xbridge *xbridge,
938 struct dpif *dpif,
939 const struct mac_learning *ml, struct stp *stp,
940 struct rstp *rstp, const struct mcast_snooping *ms,
941 const struct mbridge *mbridge,
942 const struct dpif_sflow *sflow,
943 const struct dpif_ipfix *ipfix,
944 const struct netflow *netflow,
945 bool forward_bpdu, bool has_in_band,
946 const struct dpif_backer_support *support,
947 const struct xbridge_addr *addr)
948 {
949 if (xbridge->ml != ml) {
950 mac_learning_unref(xbridge->ml);
951 xbridge->ml = mac_learning_ref(ml);
952 }
953
954 if (xbridge->ms != ms) {
955 mcast_snooping_unref(xbridge->ms);
956 xbridge->ms = mcast_snooping_ref(ms);
957 }
958
959 if (xbridge->mbridge != mbridge) {
960 mbridge_unref(xbridge->mbridge);
961 xbridge->mbridge = mbridge_ref(mbridge);
962 }
963
964 if (xbridge->sflow != sflow) {
965 dpif_sflow_unref(xbridge->sflow);
966 xbridge->sflow = dpif_sflow_ref(sflow);
967 }
968
969 if (xbridge->ipfix != ipfix) {
970 dpif_ipfix_unref(xbridge->ipfix);
971 xbridge->ipfix = dpif_ipfix_ref(ipfix);
972 }
973
974 if (xbridge->stp != stp) {
975 stp_unref(xbridge->stp);
976 xbridge->stp = stp_ref(stp);
977 }
978
979 if (xbridge->rstp != rstp) {
980 rstp_unref(xbridge->rstp);
981 xbridge->rstp = rstp_ref(rstp);
982 }
983
984 if (xbridge->netflow != netflow) {
985 netflow_unref(xbridge->netflow);
986 xbridge->netflow = netflow_ref(netflow);
987 }
988
989 if (xbridge->addr != addr) {
990 xbridge_addr_unref(xbridge->addr);
991 xbridge->addr = xbridge_addr_ref(addr);
992 }
993
994 xbridge->dpif = dpif;
995 xbridge->forward_bpdu = forward_bpdu;
996 xbridge->has_in_band = has_in_band;
997 xbridge->support = *support;
998 }
999
1000 static void
1001 xlate_xbundle_set(struct xbundle *xbundle,
1002 enum port_vlan_mode vlan_mode, uint16_t qinq_ethtype,
1003 int vlan, unsigned long *trunks, unsigned long *cvlans,
1004 enum port_priority_tags_mode use_priority_tags,
1005 const struct bond *bond, const struct lacp *lacp,
1006 bool floodable, bool protected)
1007 {
1008 ovs_assert(xbundle->xbridge);
1009
1010 xbundle->vlan_mode = vlan_mode;
1011 xbundle->qinq_ethtype = qinq_ethtype;
1012 xbundle->vlan = vlan;
1013 xbundle->trunks = trunks;
1014 xbundle->cvlans = cvlans;
1015 xbundle->use_priority_tags = use_priority_tags;
1016 xbundle->floodable = floodable;
1017 xbundle->protected = protected;
1018
1019 if (xbundle->bond != bond) {
1020 bond_unref(xbundle->bond);
1021 xbundle->bond = bond_ref(bond);
1022 }
1023
1024 if (xbundle->lacp != lacp) {
1025 lacp_unref(xbundle->lacp);
1026 xbundle->lacp = lacp_ref(lacp);
1027 }
1028 }
1029
1030 static void
1031 xlate_xport_set(struct xport *xport, odp_port_t odp_port,
1032 const struct netdev *netdev, const struct cfm *cfm,
1033 const struct bfd *bfd, const struct lldp *lldp, int stp_port_no,
1034 const struct rstp_port* rstp_port,
1035 enum ofputil_port_config config, enum ofputil_port_state state,
1036 bool is_tunnel, bool may_enable)
1037 {
1038 xport->config = config;
1039 xport->state = state;
1040 xport->stp_port_no = stp_port_no;
1041 xport->is_tunnel = is_tunnel;
1042 xport->pt_mode = netdev_get_pt_mode(netdev);
1043 xport->may_enable = may_enable;
1044 xport->odp_port = odp_port;
1045
1046 if (xport->rstp_port != rstp_port) {
1047 rstp_port_unref(xport->rstp_port);
1048 xport->rstp_port = rstp_port_ref(rstp_port);
1049 }
1050
1051 if (xport->cfm != cfm) {
1052 cfm_unref(xport->cfm);
1053 xport->cfm = cfm_ref(cfm);
1054 }
1055
1056 if (xport->bfd != bfd) {
1057 bfd_unref(xport->bfd);
1058 xport->bfd = bfd_ref(bfd);
1059 }
1060
1061 if (xport->lldp != lldp) {
1062 lldp_unref(xport->lldp);
1063 xport->lldp = lldp_ref(lldp);
1064 }
1065
1066 if (xport->netdev != netdev) {
1067 netdev_close(xport->netdev);
1068 xport->netdev = netdev_ref(netdev);
1069 }
1070 }
1071
1072 static void
1073 xlate_xbridge_copy(struct xbridge *xbridge)
1074 {
1075 struct xbundle *xbundle;
1076 struct xport *xport;
1077 struct xbridge *new_xbridge = xzalloc(sizeof *xbridge);
1078 new_xbridge->ofproto = xbridge->ofproto;
1079 new_xbridge->name = xstrdup(xbridge->name);
1080 xlate_xbridge_init(new_xcfg, new_xbridge);
1081
1082 xlate_xbridge_set(new_xbridge,
1083 xbridge->dpif, xbridge->ml, xbridge->stp,
1084 xbridge->rstp, xbridge->ms, xbridge->mbridge,
1085 xbridge->sflow, xbridge->ipfix, xbridge->netflow,
1086 xbridge->forward_bpdu, xbridge->has_in_band,
1087 &xbridge->support, xbridge->addr);
1088 LIST_FOR_EACH (xbundle, list_node, &xbridge->xbundles) {
1089 xlate_xbundle_copy(new_xbridge, xbundle);
1090 }
1091
1092 /* Copy xports which are not part of a xbundle */
1093 HMAP_FOR_EACH (xport, ofp_node, &xbridge->xports) {
1094 if (!xport->xbundle) {
1095 xlate_xport_copy(new_xbridge, NULL, xport);
1096 }
1097 }
1098 }
1099
1100 static void
1101 xlate_xbundle_copy(struct xbridge *xbridge, struct xbundle *xbundle)
1102 {
1103 struct xport *xport;
1104 struct xbundle *new_xbundle = xzalloc(sizeof *xbundle);
1105 new_xbundle->ofbundle = xbundle->ofbundle;
1106 new_xbundle->xbridge = xbridge;
1107 new_xbundle->name = xstrdup(xbundle->name);
1108 xlate_xbundle_init(new_xcfg, new_xbundle);
1109
1110 xlate_xbundle_set(new_xbundle, xbundle->vlan_mode, xbundle->qinq_ethtype,
1111 xbundle->vlan, xbundle->trunks, xbundle->cvlans,
1112 xbundle->use_priority_tags, xbundle->bond, xbundle->lacp,
1113 xbundle->floodable, xbundle->protected);
1114 LIST_FOR_EACH (xport, bundle_node, &xbundle->xports) {
1115 xlate_xport_copy(xbridge, new_xbundle, xport);
1116 }
1117 }
1118
1119 static void
1120 xlate_xport_copy(struct xbridge *xbridge, struct xbundle *xbundle,
1121 struct xport *xport)
1122 {
1123 struct skb_priority_to_dscp *pdscp, *new_pdscp;
1124 struct xport *new_xport = xzalloc(sizeof *xport);
1125 new_xport->ofport = xport->ofport;
1126 new_xport->ofp_port = xport->ofp_port;
1127 new_xport->xbridge = xbridge;
1128 new_xport->uuid = xport->uuid;
1129 xlate_xport_init(new_xcfg, new_xport);
1130
1131 xlate_xport_set(new_xport, xport->odp_port, xport->netdev, xport->cfm,
1132 xport->bfd, xport->lldp, xport->stp_port_no,
1133 xport->rstp_port, xport->config, xport->state,
1134 xport->is_tunnel, xport->may_enable);
1135
1136 if (xport->peer) {
1137 struct xport *peer = xport_lookup(new_xcfg, xport->peer->ofport);
1138 if (peer) {
1139 new_xport->peer = peer;
1140 new_xport->peer->peer = new_xport;
1141 }
1142 }
1143
1144 if (xbundle) {
1145 new_xport->xbundle = xbundle;
1146 ovs_list_insert(&new_xport->xbundle->xports, &new_xport->bundle_node);
1147 }
1148
1149 HMAP_FOR_EACH (pdscp, hmap_node, &xport->skb_priorities) {
1150 new_pdscp = xmalloc(sizeof *pdscp);
1151 new_pdscp->skb_priority = pdscp->skb_priority;
1152 new_pdscp->dscp = pdscp->dscp;
1153 hmap_insert(&new_xport->skb_priorities, &new_pdscp->hmap_node,
1154 hash_int(new_pdscp->skb_priority, 0));
1155 }
1156 }
1157
1158 /* Sets the current xlate configuration to new_xcfg and frees the old xlate
1159 * configuration in xcfgp.
1160 *
1161 * This needs to be called after editing the xlate configuration.
1162 *
1163 * Functions that edit the new xlate configuration are
1164 * xlate_<ofproto/bundle/ofport>_set and xlate_<ofproto/bundle/ofport>_remove.
1165 *
1166 * A sample workflow:
1167 *
1168 * xlate_txn_start();
1169 * ...
1170 * edit_xlate_configuration();
1171 * ...
1172 * xlate_txn_commit(); */
1173 void
1174 xlate_txn_commit(void)
1175 {
1176 struct xlate_cfg *xcfg = ovsrcu_get(struct xlate_cfg *, &xcfgp);
1177
1178 ovsrcu_set(&xcfgp, new_xcfg);
1179 ovsrcu_synchronize();
1180 xlate_xcfg_free(xcfg);
1181 new_xcfg = NULL;
1182 }
1183
1184 /* Copies the current xlate configuration in xcfgp to new_xcfg.
1185 *
1186 * This needs to be called prior to editing the xlate configuration. */
1187 void
1188 xlate_txn_start(void)
1189 {
1190 struct xbridge *xbridge;
1191 struct xlate_cfg *xcfg;
1192
1193 ovs_assert(!new_xcfg);
1194
1195 new_xcfg = xmalloc(sizeof *new_xcfg);
1196 hmap_init(&new_xcfg->xbridges);
1197 hmap_init(&new_xcfg->xbundles);
1198 hmap_init(&new_xcfg->xports);
1199 hmap_init(&new_xcfg->xports_uuid);
1200
1201 xcfg = ovsrcu_get(struct xlate_cfg *, &xcfgp);
1202 if (!xcfg) {
1203 return;
1204 }
1205
1206 HMAP_FOR_EACH (xbridge, hmap_node, &xcfg->xbridges) {
1207 xlate_xbridge_copy(xbridge);
1208 }
1209 }
1210
1211
1212 static void
1213 xlate_xcfg_free(struct xlate_cfg *xcfg)
1214 {
1215 struct xbridge *xbridge, *next_xbridge;
1216
1217 if (!xcfg) {
1218 return;
1219 }
1220
1221 HMAP_FOR_EACH_SAFE (xbridge, next_xbridge, hmap_node, &xcfg->xbridges) {
1222 xlate_xbridge_remove(xcfg, xbridge);
1223 }
1224
1225 hmap_destroy(&xcfg->xbridges);
1226 hmap_destroy(&xcfg->xbundles);
1227 hmap_destroy(&xcfg->xports);
1228 hmap_destroy(&xcfg->xports_uuid);
1229 free(xcfg);
1230 }
1231
1232 void
1233 xlate_ofproto_set(struct ofproto_dpif *ofproto, const char *name,
1234 struct dpif *dpif,
1235 const struct mac_learning *ml, struct stp *stp,
1236 struct rstp *rstp, const struct mcast_snooping *ms,
1237 const struct mbridge *mbridge,
1238 const struct dpif_sflow *sflow,
1239 const struct dpif_ipfix *ipfix,
1240 const struct netflow *netflow,
1241 bool forward_bpdu, bool has_in_band,
1242 const struct dpif_backer_support *support)
1243 {
1244 struct xbridge *xbridge;
1245 struct xbridge_addr *xbridge_addr, *old_addr;
1246
1247 ovs_assert(new_xcfg);
1248
1249 xbridge = xbridge_lookup(new_xcfg, ofproto);
1250 if (!xbridge) {
1251 xbridge = xzalloc(sizeof *xbridge);
1252 xbridge->ofproto = ofproto;
1253
1254 xlate_xbridge_init(new_xcfg, xbridge);
1255 }
1256
1257 free(xbridge->name);
1258 xbridge->name = xstrdup(name);
1259
1260 xbridge_addr = xbridge_addr_create(xbridge);
1261 old_addr = xbridge->addr;
1262
1263 xlate_xbridge_set(xbridge, dpif, ml, stp, rstp, ms, mbridge, sflow, ipfix,
1264 netflow, forward_bpdu, has_in_band, support,
1265 xbridge_addr);
1266
1267 if (xbridge_addr != old_addr) {
1268 xbridge_addr_unref(xbridge_addr);
1269 }
1270 }
1271
1272 static void
1273 xlate_xbridge_remove(struct xlate_cfg *xcfg, struct xbridge *xbridge)
1274 {
1275 struct xbundle *xbundle, *next_xbundle;
1276 struct xport *xport, *next_xport;
1277
1278 if (!xbridge) {
1279 return;
1280 }
1281
1282 HMAP_FOR_EACH_SAFE (xport, next_xport, ofp_node, &xbridge->xports) {
1283 xlate_xport_remove(xcfg, xport);
1284 }
1285
1286 LIST_FOR_EACH_SAFE (xbundle, next_xbundle, list_node, &xbridge->xbundles) {
1287 xlate_xbundle_remove(xcfg, xbundle);
1288 }
1289
1290 hmap_remove(&xcfg->xbridges, &xbridge->hmap_node);
1291 mac_learning_unref(xbridge->ml);
1292 mcast_snooping_unref(xbridge->ms);
1293 mbridge_unref(xbridge->mbridge);
1294 dpif_sflow_unref(xbridge->sflow);
1295 dpif_ipfix_unref(xbridge->ipfix);
1296 netflow_unref(xbridge->netflow);
1297 stp_unref(xbridge->stp);
1298 rstp_unref(xbridge->rstp);
1299 xbridge_addr_unref(xbridge->addr);
1300 hmap_destroy(&xbridge->xports);
1301 free(xbridge->name);
1302 free(xbridge);
1303 }
1304
1305 void
1306 xlate_remove_ofproto(struct ofproto_dpif *ofproto)
1307 {
1308 struct xbridge *xbridge;
1309
1310 ovs_assert(new_xcfg);
1311
1312 xbridge = xbridge_lookup(new_xcfg, ofproto);
1313 xlate_xbridge_remove(new_xcfg, xbridge);
1314 }
1315
1316 void
1317 xlate_bundle_set(struct ofproto_dpif *ofproto, struct ofbundle *ofbundle,
1318 const char *name, enum port_vlan_mode vlan_mode,
1319 uint16_t qinq_ethtype, int vlan,
1320 unsigned long *trunks, unsigned long *cvlans,
1321 enum port_priority_tags_mode use_priority_tags,
1322 const struct bond *bond, const struct lacp *lacp,
1323 bool floodable, bool protected)
1324 {
1325 struct xbundle *xbundle;
1326
1327 ovs_assert(new_xcfg);
1328
1329 xbundle = xbundle_lookup(new_xcfg, ofbundle);
1330 if (!xbundle) {
1331 xbundle = xzalloc(sizeof *xbundle);
1332 xbundle->ofbundle = ofbundle;
1333 xbundle->xbridge = xbridge_lookup(new_xcfg, ofproto);
1334
1335 xlate_xbundle_init(new_xcfg, xbundle);
1336 }
1337
1338 free(xbundle->name);
1339 xbundle->name = xstrdup(name);
1340
1341 xlate_xbundle_set(xbundle, vlan_mode, qinq_ethtype, vlan, trunks, cvlans,
1342 use_priority_tags, bond, lacp, floodable, protected);
1343 }
1344
1345 static void
1346 xlate_xbundle_remove(struct xlate_cfg *xcfg, struct xbundle *xbundle)
1347 {
1348 struct xport *xport;
1349
1350 if (!xbundle) {
1351 return;
1352 }
1353
1354 LIST_FOR_EACH_POP (xport, bundle_node, &xbundle->xports) {
1355 xport->xbundle = NULL;
1356 }
1357
1358 hmap_remove(&xcfg->xbundles, &xbundle->hmap_node);
1359 ovs_list_remove(&xbundle->list_node);
1360 bond_unref(xbundle->bond);
1361 lacp_unref(xbundle->lacp);
1362 free(xbundle->name);
1363 free(xbundle);
1364 }
1365
1366 void
1367 xlate_bundle_remove(struct ofbundle *ofbundle)
1368 {
1369 struct xbundle *xbundle;
1370
1371 ovs_assert(new_xcfg);
1372
1373 xbundle = xbundle_lookup(new_xcfg, ofbundle);
1374 xlate_xbundle_remove(new_xcfg, xbundle);
1375 }
1376
1377 void
1378 xlate_ofport_set(struct ofproto_dpif *ofproto, struct ofbundle *ofbundle,
1379 struct ofport_dpif *ofport, ofp_port_t ofp_port,
1380 odp_port_t odp_port, const struct netdev *netdev,
1381 const struct cfm *cfm, const struct bfd *bfd,
1382 const struct lldp *lldp, struct ofport_dpif *peer,
1383 int stp_port_no, const struct rstp_port *rstp_port,
1384 const struct ofproto_port_queue *qdscp_list, size_t n_qdscp,
1385 enum ofputil_port_config config,
1386 enum ofputil_port_state state, bool is_tunnel,
1387 bool may_enable)
1388 {
1389 size_t i;
1390 struct xport *xport;
1391
1392 ovs_assert(new_xcfg);
1393
1394 xport = xport_lookup(new_xcfg, ofport);
1395 if (!xport) {
1396 xport = xzalloc(sizeof *xport);
1397 xport->ofport = ofport;
1398 xport->xbridge = xbridge_lookup(new_xcfg, ofproto);
1399 xport->ofp_port = ofp_port;
1400 uuid_generate(&xport->uuid);
1401
1402 xlate_xport_init(new_xcfg, xport);
1403 }
1404
1405 ovs_assert(xport->ofp_port == ofp_port);
1406
1407 xlate_xport_set(xport, odp_port, netdev, cfm, bfd, lldp,
1408 stp_port_no, rstp_port, config, state, is_tunnel,
1409 may_enable);
1410
1411 if (xport->peer) {
1412 xport->peer->peer = NULL;
1413 }
1414 xport->peer = xport_lookup(new_xcfg, peer);
1415 if (xport->peer) {
1416 xport->peer->peer = xport;
1417 }
1418
1419 if (xport->xbundle) {
1420 ovs_list_remove(&xport->bundle_node);
1421 }
1422 xport->xbundle = xbundle_lookup(new_xcfg, ofbundle);
1423 if (xport->xbundle) {
1424 ovs_list_insert(&xport->xbundle->xports, &xport->bundle_node);
1425 }
1426
1427 clear_skb_priorities(xport);
1428 for (i = 0; i < n_qdscp; i++) {
1429 struct skb_priority_to_dscp *pdscp;
1430 uint32_t skb_priority;
1431
1432 if (dpif_queue_to_priority(xport->xbridge->dpif, qdscp_list[i].queue,
1433 &skb_priority)) {
1434 continue;
1435 }
1436
1437 pdscp = xmalloc(sizeof *pdscp);
1438 pdscp->skb_priority = skb_priority;
1439 pdscp->dscp = (qdscp_list[i].dscp << 2) & IP_DSCP_MASK;
1440 hmap_insert(&xport->skb_priorities, &pdscp->hmap_node,
1441 hash_int(pdscp->skb_priority, 0));
1442 }
1443 }
1444
1445 static void
1446 xlate_xport_remove(struct xlate_cfg *xcfg, struct xport *xport)
1447 {
1448 if (!xport) {
1449 return;
1450 }
1451
1452 if (xport->peer) {
1453 xport->peer->peer = NULL;
1454 xport->peer = NULL;
1455 }
1456
1457 if (xport->xbundle) {
1458 ovs_list_remove(&xport->bundle_node);
1459 }
1460
1461 clear_skb_priorities(xport);
1462 hmap_destroy(&xport->skb_priorities);
1463
1464 hmap_remove(&xcfg->xports, &xport->hmap_node);
1465 hmap_remove(&xcfg->xports_uuid, &xport->uuid_node);
1466 hmap_remove(&xport->xbridge->xports, &xport->ofp_node);
1467
1468 netdev_close(xport->netdev);
1469 rstp_port_unref(xport->rstp_port);
1470 cfm_unref(xport->cfm);
1471 bfd_unref(xport->bfd);
1472 lldp_unref(xport->lldp);
1473 free(xport);
1474 }
1475
1476 void
1477 xlate_ofport_remove(struct ofport_dpif *ofport)
1478 {
1479 struct xport *xport;
1480
1481 ovs_assert(new_xcfg);
1482
1483 xport = xport_lookup(new_xcfg, ofport);
1484 xlate_xport_remove(new_xcfg, xport);
1485 }
1486
1487 static struct ofproto_dpif *
1488 xlate_lookup_ofproto_(const struct dpif_backer *backer,
1489 const struct flow *flow,
1490 ofp_port_t *ofp_in_port, const struct xport **xportp,
1491 char **errorp)
1492 {
1493 struct xlate_cfg *xcfg = ovsrcu_get(struct xlate_cfg *, &xcfgp);
1494 const struct xport *xport;
1495
1496 /* If packet is recirculated, xport can be retrieved from frozen state. */
1497 if (flow->recirc_id) {
1498 const struct recirc_id_node *recirc_id_node;
1499
1500 recirc_id_node = recirc_id_node_find(flow->recirc_id);
1501
1502 if (OVS_UNLIKELY(!recirc_id_node)) {
1503 if (errorp) {
1504 *errorp = xasprintf("no recirculation data for recirc_id "
1505 "%"PRIu32, flow->recirc_id);
1506 }
1507 return NULL;
1508 }
1509
1510 /* If recirculation was initiated due to bond (in_port = OFPP_NONE)
1511 * then frozen state is static and xport_uuid is not defined, so xport
1512 * cannot be restored from frozen state. */
1513 if (recirc_id_node->state.metadata.in_port != OFPP_NONE) {
1514 struct uuid xport_uuid = recirc_id_node->state.xport_uuid;
1515 xport = xport_lookup_by_uuid(xcfg, &xport_uuid);
1516 if (xport && xport->xbridge && xport->xbridge->ofproto) {
1517 goto out;
1518 }
1519 }
1520 }
1521
1522 xport = xport_lookup(xcfg, tnl_port_should_receive(flow)
1523 ? tnl_port_receive(flow)
1524 : odp_port_to_ofport(backer, flow->in_port.odp_port));
1525 if (OVS_UNLIKELY(!xport)) {
1526 if (errorp) {
1527 *errorp = (tnl_port_should_receive(flow)
1528 ? xstrdup("no OpenFlow tunnel port for this packet")
1529 : xasprintf("no OpenFlow tunnel port for datapath "
1530 "port %"PRIu32, flow->in_port.odp_port));
1531 }
1532 return NULL;
1533 }
1534
1535 out:
1536 if (errorp) {
1537 *errorp = NULL;
1538 }
1539 *xportp = xport;
1540 if (ofp_in_port) {
1541 *ofp_in_port = xport->ofp_port;
1542 }
1543 return xport->xbridge->ofproto;
1544 }
1545
1546 /* Given a datapath and flow metadata ('backer', and 'flow' respectively)
1547 * returns the corresponding struct ofproto_dpif and OpenFlow port number. */
1548 struct ofproto_dpif *
1549 xlate_lookup_ofproto(const struct dpif_backer *backer, const struct flow *flow,
1550 ofp_port_t *ofp_in_port, char **errorp)
1551 {
1552 const struct xport *xport;
1553
1554 return xlate_lookup_ofproto_(backer, flow, ofp_in_port, &xport, errorp);
1555 }
1556
1557 /* Given a datapath and flow metadata ('backer', and 'flow' respectively),
1558 * optionally populates 'ofprotop' with the ofproto_dpif, 'ofp_in_port' with the
1559 * openflow in_port, and 'ipfix', 'sflow', and 'netflow' with the appropriate
1560 * handles for those protocols if they're enabled. Caller may use the returned
1561 * pointers until quiescing, for longer term use additional references must
1562 * be taken.
1563 *
1564 * Returns 0 if successful, ENODEV if the parsed flow has no associated ofproto.
1565 */
1566 int
1567 xlate_lookup(const struct dpif_backer *backer, const struct flow *flow,
1568 struct ofproto_dpif **ofprotop, struct dpif_ipfix **ipfix,
1569 struct dpif_sflow **sflow, struct netflow **netflow,
1570 ofp_port_t *ofp_in_port)
1571 {
1572 struct ofproto_dpif *ofproto;
1573 const struct xport *xport;
1574
1575 ofproto = xlate_lookup_ofproto_(backer, flow, ofp_in_port, &xport, NULL);
1576
1577 if (!ofproto) {
1578 return ENODEV;
1579 }
1580
1581 if (ofprotop) {
1582 *ofprotop = ofproto;
1583 }
1584
1585 if (ipfix) {
1586 *ipfix = xport ? xport->xbridge->ipfix : NULL;
1587 }
1588
1589 if (sflow) {
1590 *sflow = xport ? xport->xbridge->sflow : NULL;
1591 }
1592
1593 if (netflow) {
1594 *netflow = xport ? xport->xbridge->netflow : NULL;
1595 }
1596
1597 return 0;
1598 }
1599
1600 static struct xbridge *
1601 xbridge_lookup(struct xlate_cfg *xcfg, const struct ofproto_dpif *ofproto)
1602 {
1603 struct hmap *xbridges;
1604 struct xbridge *xbridge;
1605
1606 if (!ofproto || !xcfg) {
1607 return NULL;
1608 }
1609
1610 xbridges = &xcfg->xbridges;
1611
1612 HMAP_FOR_EACH_IN_BUCKET (xbridge, hmap_node, hash_pointer(ofproto, 0),
1613 xbridges) {
1614 if (xbridge->ofproto == ofproto) {
1615 return xbridge;
1616 }
1617 }
1618 return NULL;
1619 }
1620
1621 static struct xbridge *
1622 xbridge_lookup_by_uuid(struct xlate_cfg *xcfg, const struct uuid *uuid)
1623 {
1624 struct xbridge *xbridge;
1625
1626 HMAP_FOR_EACH (xbridge, hmap_node, &xcfg->xbridges) {
1627 if (uuid_equals(&xbridge->ofproto->uuid, uuid)) {
1628 return xbridge;
1629 }
1630 }
1631 return NULL;
1632 }
1633
1634 static struct xbundle *
1635 xbundle_lookup(struct xlate_cfg *xcfg, const struct ofbundle *ofbundle)
1636 {
1637 struct hmap *xbundles;
1638 struct xbundle *xbundle;
1639
1640 if (!ofbundle || !xcfg) {
1641 return NULL;
1642 }
1643
1644 xbundles = &xcfg->xbundles;
1645
1646 HMAP_FOR_EACH_IN_BUCKET (xbundle, hmap_node, hash_pointer(ofbundle, 0),
1647 xbundles) {
1648 if (xbundle->ofbundle == ofbundle) {
1649 return xbundle;
1650 }
1651 }
1652 return NULL;
1653 }
1654
1655 static struct xport *
1656 xport_lookup(struct xlate_cfg *xcfg, const struct ofport_dpif *ofport)
1657 {
1658 struct hmap *xports;
1659 struct xport *xport;
1660
1661 if (!ofport || !xcfg) {
1662 return NULL;
1663 }
1664
1665 xports = &xcfg->xports;
1666
1667 HMAP_FOR_EACH_IN_BUCKET (xport, hmap_node, hash_pointer(ofport, 0),
1668 xports) {
1669 if (xport->ofport == ofport) {
1670 return xport;
1671 }
1672 }
1673 return NULL;
1674 }
1675
1676 static struct xport *
1677 xport_lookup_by_uuid(struct xlate_cfg *xcfg, const struct uuid *uuid)
1678 {
1679 struct hmap *xports;
1680 struct xport *xport;
1681
1682 if (uuid_is_zero(uuid) || !xcfg) {
1683 return NULL;
1684 }
1685
1686 xports = &xcfg->xports_uuid;
1687
1688 HMAP_FOR_EACH_IN_BUCKET (xport, uuid_node, uuid_hash(uuid), xports) {
1689 if (uuid_equals(&xport->uuid, uuid)) {
1690 return xport;
1691 }
1692 }
1693 return NULL;
1694 }
1695
1696 static struct stp_port *
1697 xport_get_stp_port(const struct xport *xport)
1698 {
1699 return xport->xbridge->stp && xport->stp_port_no != -1
1700 ? stp_get_port(xport->xbridge->stp, xport->stp_port_no)
1701 : NULL;
1702 }
1703
1704 static bool
1705 xport_stp_learn_state(const struct xport *xport)
1706 {
1707 struct stp_port *sp = xport_get_stp_port(xport);
1708 return sp
1709 ? stp_learn_in_state(stp_port_get_state(sp))
1710 : true;
1711 }
1712
1713 static bool
1714 xport_stp_forward_state(const struct xport *xport)
1715 {
1716 struct stp_port *sp = xport_get_stp_port(xport);
1717 return sp
1718 ? stp_forward_in_state(stp_port_get_state(sp))
1719 : true;
1720 }
1721
1722 static bool
1723 xport_stp_should_forward_bpdu(const struct xport *xport)
1724 {
1725 struct stp_port *sp = xport_get_stp_port(xport);
1726 return stp_should_forward_bpdu(sp ? stp_port_get_state(sp) : STP_DISABLED);
1727 }
1728
1729 /* Returns true if STP should process 'flow'. Sets fields in 'wc' that
1730 * were used to make the determination.*/
1731 static bool
1732 stp_should_process_flow(const struct flow *flow, struct flow_wildcards *wc)
1733 {
1734 /* is_stp() also checks dl_type, but dl_type is always set in 'wc'. */
1735 memset(&wc->masks.dl_dst, 0xff, sizeof wc->masks.dl_dst);
1736 return is_stp(flow);
1737 }
1738
1739 static void
1740 stp_process_packet(const struct xport *xport, const struct dp_packet *packet)
1741 {
1742 struct stp_port *sp = xport_get_stp_port(xport);
1743 struct dp_packet payload = *packet;
1744 struct eth_header *eth = dp_packet_data(&payload);
1745
1746 /* Sink packets on ports that have STP disabled when the bridge has
1747 * STP enabled. */
1748 if (!sp || stp_port_get_state(sp) == STP_DISABLED) {
1749 return;
1750 }
1751
1752 /* Trim off padding on payload. */
1753 if (dp_packet_size(&payload) > ntohs(eth->eth_type) + ETH_HEADER_LEN) {
1754 dp_packet_set_size(&payload, ntohs(eth->eth_type) + ETH_HEADER_LEN);
1755 }
1756
1757 if (dp_packet_try_pull(&payload, ETH_HEADER_LEN + LLC_HEADER_LEN)) {
1758 stp_received_bpdu(sp, dp_packet_data(&payload), dp_packet_size(&payload));
1759 }
1760 }
1761
1762 static enum rstp_state
1763 xport_get_rstp_port_state(const struct xport *xport)
1764 {
1765 return xport->rstp_port
1766 ? rstp_port_get_state(xport->rstp_port)
1767 : RSTP_DISABLED;
1768 }
1769
1770 static bool
1771 xport_rstp_learn_state(const struct xport *xport)
1772 {
1773 return xport->xbridge->rstp && xport->rstp_port
1774 ? rstp_learn_in_state(xport_get_rstp_port_state(xport))
1775 : true;
1776 }
1777
1778 static bool
1779 xport_rstp_forward_state(const struct xport *xport)
1780 {
1781 return xport->xbridge->rstp && xport->rstp_port
1782 ? rstp_forward_in_state(xport_get_rstp_port_state(xport))
1783 : true;
1784 }
1785
1786 static bool
1787 xport_rstp_should_manage_bpdu(const struct xport *xport)
1788 {
1789 return rstp_should_manage_bpdu(xport_get_rstp_port_state(xport));
1790 }
1791
1792 static void
1793 rstp_process_packet(const struct xport *xport, const struct dp_packet *packet)
1794 {
1795 struct dp_packet payload = *packet;
1796 struct eth_header *eth = dp_packet_data(&payload);
1797
1798 /* Sink packets on ports that have no RSTP. */
1799 if (!xport->rstp_port) {
1800 return;
1801 }
1802
1803 /* Trim off padding on payload. */
1804 if (dp_packet_size(&payload) > ntohs(eth->eth_type) + ETH_HEADER_LEN) {
1805 dp_packet_set_size(&payload, ntohs(eth->eth_type) + ETH_HEADER_LEN);
1806 }
1807
1808 int len = ETH_HEADER_LEN + LLC_HEADER_LEN;
1809 if (eth->eth_type == htons(ETH_TYPE_VLAN)) {
1810 len += VLAN_HEADER_LEN;
1811 }
1812 if (dp_packet_try_pull(&payload, len)) {
1813 rstp_port_received_bpdu(xport->rstp_port, dp_packet_data(&payload),
1814 dp_packet_size(&payload));
1815 }
1816 }
1817
1818 static struct xport *
1819 get_ofp_port(const struct xbridge *xbridge, ofp_port_t ofp_port)
1820 {
1821 struct xport *xport;
1822
1823 HMAP_FOR_EACH_IN_BUCKET (xport, ofp_node, hash_ofp_port(ofp_port),
1824 &xbridge->xports) {
1825 if (xport->ofp_port == ofp_port) {
1826 return xport;
1827 }
1828 }
1829 return NULL;
1830 }
1831
1832 static odp_port_t
1833 ofp_port_to_odp_port(const struct xbridge *xbridge, ofp_port_t ofp_port)
1834 {
1835 const struct xport *xport = get_ofp_port(xbridge, ofp_port);
1836 return xport ? xport->odp_port : ODPP_NONE;
1837 }
1838
1839 static bool
1840 odp_port_is_alive(const struct xlate_ctx *ctx, ofp_port_t ofp_port)
1841 {
1842 struct xport *xport = get_ofp_port(ctx->xbridge, ofp_port);
1843 return xport && xport->may_enable;
1844 }
1845
1846 static struct ofputil_bucket *
1847 group_first_live_bucket(const struct xlate_ctx *, const struct group_dpif *,
1848 int depth);
1849
1850 static bool
1851 group_is_alive(const struct xlate_ctx *ctx, uint32_t group_id, int depth)
1852 {
1853 struct group_dpif *group;
1854
1855 group = group_dpif_lookup(ctx->xbridge->ofproto, group_id,
1856 ctx->xin->tables_version, false);
1857 if (group) {
1858 return group_first_live_bucket(ctx, group, depth) != NULL;
1859 }
1860
1861 return false;
1862 }
1863
1864 #define MAX_LIVENESS_RECURSION 128 /* Arbitrary limit */
1865
1866 static bool
1867 bucket_is_alive(const struct xlate_ctx *ctx,
1868 struct ofputil_bucket *bucket, int depth)
1869 {
1870 if (depth >= MAX_LIVENESS_RECURSION) {
1871 xlate_report_error(ctx, "bucket chaining exceeded %d links",
1872 MAX_LIVENESS_RECURSION);
1873 return false;
1874 }
1875
1876 return (!ofputil_bucket_has_liveness(bucket)
1877 || (bucket->watch_port != OFPP_ANY
1878 && odp_port_is_alive(ctx, bucket->watch_port))
1879 || (bucket->watch_group != OFPG_ANY
1880 && group_is_alive(ctx, bucket->watch_group, depth + 1)));
1881 }
1882
1883 static void
1884 xlate_report_bucket_not_live(const struct xlate_ctx *ctx,
1885 const struct ofputil_bucket *bucket)
1886 {
1887 if (OVS_UNLIKELY(ctx->xin->trace)) {
1888 struct ds s = DS_EMPTY_INITIALIZER;
1889 if (bucket->watch_port != OFPP_ANY) {
1890 ds_put_cstr(&s, "port ");
1891 ofputil_format_port(bucket->watch_port, NULL, &s);
1892 }
1893 if (bucket->watch_group != OFPG_ANY) {
1894 if (s.length) {
1895 ds_put_cstr(&s, " and ");
1896 }
1897 ds_put_format(&s, "port %"PRIu32, bucket->watch_group);
1898 }
1899
1900 xlate_report(ctx, OFT_DETAIL, "bucket %"PRIu32": not live due to %s",
1901 bucket->bucket_id, ds_cstr(&s));
1902
1903 ds_destroy(&s);
1904 }
1905 }
1906
1907 static struct ofputil_bucket *
1908 group_first_live_bucket(const struct xlate_ctx *ctx,
1909 const struct group_dpif *group, int depth)
1910 {
1911 struct ofputil_bucket *bucket;
1912 LIST_FOR_EACH (bucket, list_node, &group->up.buckets) {
1913 if (bucket_is_alive(ctx, bucket, depth)) {
1914 return bucket;
1915 }
1916 xlate_report_bucket_not_live(ctx, bucket);
1917 }
1918
1919 return NULL;
1920 }
1921
1922 static struct ofputil_bucket *
1923 group_best_live_bucket(const struct xlate_ctx *ctx,
1924 const struct group_dpif *group,
1925 uint32_t basis)
1926 {
1927 struct ofputil_bucket *best_bucket = NULL;
1928 uint32_t best_score = 0;
1929
1930 struct ofputil_bucket *bucket;
1931 LIST_FOR_EACH (bucket, list_node, &group->up.buckets) {
1932 if (bucket_is_alive(ctx, bucket, 0)) {
1933 uint32_t score =
1934 (hash_int(bucket->bucket_id, basis) & 0xffff) * bucket->weight;
1935 if (score >= best_score) {
1936 best_bucket = bucket;
1937 best_score = score;
1938 }
1939 xlate_report(ctx, OFT_DETAIL, "bucket %"PRIu32": score %"PRIu32,
1940 bucket->bucket_id, score);
1941 } else {
1942 xlate_report_bucket_not_live(ctx, bucket);
1943 }
1944 }
1945
1946 return best_bucket;
1947 }
1948
1949 static bool
1950 xbundle_trunks_vlan(const struct xbundle *bundle, uint16_t vlan)
1951 {
1952 return (bundle->vlan_mode != PORT_VLAN_ACCESS
1953 && (!bundle->trunks || bitmap_is_set(bundle->trunks, vlan)));
1954 }
1955
1956 static bool
1957 xbundle_allows_cvlan(const struct xbundle *bundle, uint16_t vlan)
1958 {
1959 return (!bundle->cvlans || bitmap_is_set(bundle->cvlans, vlan));
1960 }
1961
1962 static bool
1963 xbundle_includes_vlan(const struct xbundle *xbundle, const struct xvlan *xvlan)
1964 {
1965 switch (xbundle->vlan_mode) {
1966 case PORT_VLAN_ACCESS:
1967 return xvlan->v[0].vid == xbundle->vlan && xvlan->v[1].vid == 0;
1968
1969 case PORT_VLAN_TRUNK:
1970 case PORT_VLAN_NATIVE_UNTAGGED:
1971 case PORT_VLAN_NATIVE_TAGGED:
1972 return xbundle_trunks_vlan(xbundle, xvlan->v[0].vid);
1973
1974 case PORT_VLAN_DOT1Q_TUNNEL:
1975 return xvlan->v[0].vid == xbundle->vlan &&
1976 xbundle_allows_cvlan(xbundle, xvlan->v[1].vid);
1977
1978 default:
1979 OVS_NOT_REACHED();
1980 }
1981 }
1982
1983 static mirror_mask_t
1984 xbundle_mirror_out(const struct xbridge *xbridge, struct xbundle *xbundle)
1985 {
1986 return xbundle != &ofpp_none_bundle
1987 ? mirror_bundle_out(xbridge->mbridge, xbundle->ofbundle)
1988 : 0;
1989 }
1990
1991 static mirror_mask_t
1992 xbundle_mirror_src(const struct xbridge *xbridge, struct xbundle *xbundle)
1993 {
1994 return xbundle != &ofpp_none_bundle
1995 ? mirror_bundle_src(xbridge->mbridge, xbundle->ofbundle)
1996 : 0;
1997 }
1998
1999 static mirror_mask_t
2000 xbundle_mirror_dst(const struct xbridge *xbridge, struct xbundle *xbundle)
2001 {
2002 return xbundle != &ofpp_none_bundle
2003 ? mirror_bundle_dst(xbridge->mbridge, xbundle->ofbundle)
2004 : 0;
2005 }
2006
2007 static struct xbundle *
2008 lookup_input_bundle__(const struct xbridge *xbridge,
2009 ofp_port_t in_port, struct xport **in_xportp)
2010 {
2011 struct xport *xport;
2012
2013 /* Find the port and bundle for the received packet. */
2014 xport = get_ofp_port(xbridge, in_port);
2015 if (in_xportp) {
2016 *in_xportp = xport;
2017 }
2018 if (xport && xport->xbundle) {
2019 return xport->xbundle;
2020 }
2021
2022 /* Special-case OFPP_NONE (OF1.0) and OFPP_CONTROLLER (OF1.1+),
2023 * which a controller may use as the ingress port for traffic that
2024 * it is sourcing. */
2025 if (in_port == OFPP_CONTROLLER || in_port == OFPP_NONE) {
2026 return &ofpp_none_bundle;
2027 }
2028 return NULL;
2029 }
2030
2031 static struct xbundle *
2032 lookup_input_bundle(const struct xlate_ctx *ctx,
2033 ofp_port_t in_port, struct xport **in_xportp)
2034 {
2035 struct xbundle *xbundle = lookup_input_bundle__(ctx->xbridge,
2036 in_port, in_xportp);
2037 if (!xbundle) {
2038 /* Odd. A few possible reasons here:
2039 *
2040 * - We deleted a port but there are still a few packets queued up
2041 * from it.
2042 *
2043 * - Someone externally added a port (e.g. "ovs-dpctl add-if") that
2044 * we don't know about.
2045 *
2046 * - The ofproto client didn't configure the port as part of a bundle.
2047 * This is particularly likely to happen if a packet was received on
2048 * the port after it was created, but before the client had a chance
2049 * to configure its bundle.
2050 */
2051 xlate_report_error(ctx, "received packet on unknown port %"PRIu32,
2052 in_port);
2053 }
2054 return xbundle;
2055 }
2056
2057 /* Mirrors the packet represented by 'ctx' to appropriate mirror destinations,
2058 * given the packet is ingressing or egressing on 'xbundle', which has ingress
2059 * or egress (as appropriate) mirrors 'mirrors'. */
2060 static void
2061 mirror_packet(struct xlate_ctx *ctx, struct xbundle *xbundle,
2062 mirror_mask_t mirrors)
2063 {
2064 struct xvlan in_xvlan;
2065 struct xvlan xvlan;
2066
2067 /* Figure out what VLAN the packet is in (because mirrors can select
2068 * packets on basis of VLAN). */
2069 xvlan_extract(&ctx->xin->flow, &in_xvlan);
2070 if (!input_vid_is_valid(ctx, in_xvlan.v[0].vid, xbundle)) {
2071 return;
2072 }
2073 xvlan_input_translate(xbundle, &in_xvlan, &xvlan);
2074
2075 const struct xbridge *xbridge = ctx->xbridge;
2076
2077 /* Don't mirror to destinations that we've already mirrored to. */
2078 mirrors &= ~ctx->mirrors;
2079 if (!mirrors) {
2080 return;
2081 }
2082
2083 /* 'mirrors' is a bit-mask of candidates for mirroring. Iterate through
2084 * the candidates, adding the ones that really should be mirrored to
2085 * 'used_mirrors', as long as some candidates remain. */
2086 mirror_mask_t used_mirrors = 0;
2087 while (mirrors) {
2088 const unsigned long *vlans;
2089 mirror_mask_t dup_mirrors;
2090 struct ofbundle *out;
2091 int out_vlan;
2092 int snaplen;
2093
2094 /* Get the details of the mirror represented by the rightmost 1-bit. */
2095 ovs_assert(mirror_get(xbridge->mbridge, raw_ctz(mirrors),
2096 &vlans, &dup_mirrors,
2097 &out, &snaplen, &out_vlan));
2098
2099
2100 /* If this mirror selects on the basis of VLAN, and it does not select
2101 * 'vlan', then discard this mirror and go on to the next one. */
2102 if (vlans) {
2103 ctx->wc->masks.vlans[0].tci |= htons(VLAN_CFI | VLAN_VID_MASK);
2104 }
2105 if (vlans && !bitmap_is_set(vlans, xvlan.v[0].vid)) {
2106 mirrors = zero_rightmost_1bit(mirrors);
2107 continue;
2108 }
2109
2110 /* We sent a packet to this mirror. */
2111 used_mirrors |= rightmost_1bit(mirrors);
2112
2113 /* Record the mirror, and the mirrors that output to the same
2114 * destination, so that we don't mirror to them again. This must be
2115 * done now to ensure that output_normal(), below, doesn't recursively
2116 * output to the same mirrors. */
2117 ctx->mirrors |= dup_mirrors;
2118 ctx->mirror_snaplen = snaplen;
2119
2120 /* Send the packet to the mirror. */
2121 if (out) {
2122 struct xbundle *out_xbundle = xbundle_lookup(ctx->xcfg, out);
2123 if (out_xbundle) {
2124 output_normal(ctx, out_xbundle, &xvlan);
2125 }
2126 } else if (xvlan.v[0].vid != out_vlan
2127 && !eth_addr_is_reserved(ctx->xin->flow.dl_dst)) {
2128 struct xbundle *xb;
2129 uint16_t old_vid = xvlan.v[0].vid;
2130
2131 xvlan.v[0].vid = out_vlan;
2132 LIST_FOR_EACH (xb, list_node, &xbridge->xbundles) {
2133 if (xbundle_includes_vlan(xb, &xvlan)
2134 && !xbundle_mirror_out(xbridge, xb)) {
2135 output_normal(ctx, xb, &xvlan);
2136 }
2137 }
2138 xvlan.v[0].vid = old_vid;
2139 }
2140
2141 /* output_normal() could have recursively output (to different
2142 * mirrors), so make sure that we don't send duplicates. */
2143 mirrors &= ~ctx->mirrors;
2144 ctx->mirror_snaplen = 0;
2145 }
2146
2147 if (used_mirrors) {
2148 if (ctx->xin->resubmit_stats) {
2149 mirror_update_stats(xbridge->mbridge, used_mirrors,
2150 ctx->xin->resubmit_stats->n_packets,
2151 ctx->xin->resubmit_stats->n_bytes);
2152 }
2153 if (ctx->xin->xcache) {
2154 struct xc_entry *entry;
2155
2156 entry = xlate_cache_add_entry(ctx->xin->xcache, XC_MIRROR);
2157 entry->mirror.mbridge = mbridge_ref(xbridge->mbridge);
2158 entry->mirror.mirrors = used_mirrors;
2159 }
2160 }
2161 }
2162
2163 static void
2164 mirror_ingress_packet(struct xlate_ctx *ctx)
2165 {
2166 if (mbridge_has_mirrors(ctx->xbridge->mbridge)) {
2167 struct xbundle *xbundle = lookup_input_bundle(
2168 ctx, ctx->xin->flow.in_port.ofp_port, NULL);
2169 if (xbundle) {
2170 mirror_packet(ctx, xbundle,
2171 xbundle_mirror_src(ctx->xbridge, xbundle));
2172 }
2173 }
2174 }
2175
2176 /* Checks whether a packet with the given 'vid' may ingress on 'in_xbundle'.
2177 * If so, returns true. Otherwise, returns false.
2178 *
2179 * 'vid' should be the VID obtained from the 802.1Q header that was received as
2180 * part of a packet (specify 0 if there was no 802.1Q header), in the range
2181 * 0...4095. */
2182 static bool
2183 input_vid_is_valid(const struct xlate_ctx *ctx,
2184 uint16_t vid, struct xbundle *in_xbundle)
2185 {
2186 /* Allow any VID on the OFPP_NONE port. */
2187 if (in_xbundle == &ofpp_none_bundle) {
2188 return true;
2189 }
2190
2191 switch (in_xbundle->vlan_mode) {
2192 case PORT_VLAN_ACCESS:
2193 if (vid) {
2194 xlate_report_error(ctx, "dropping VLAN %"PRIu16" tagged "
2195 "packet received on port %s configured as VLAN "
2196 "%d access port", vid, in_xbundle->name,
2197 in_xbundle->vlan);
2198 return false;
2199 }
2200 return true;
2201
2202 case PORT_VLAN_NATIVE_UNTAGGED:
2203 case PORT_VLAN_NATIVE_TAGGED:
2204 if (!vid) {
2205 /* Port must always carry its native VLAN. */
2206 return true;
2207 }
2208 /* Fall through. */
2209 case PORT_VLAN_TRUNK:
2210 if (!xbundle_trunks_vlan(in_xbundle, vid)) {
2211 xlate_report_error(ctx, "dropping VLAN %"PRIu16" packet "
2212 "received on port %s not configured for "
2213 "trunking VLAN %"PRIu16,
2214 vid, in_xbundle->name, vid);
2215 return false;
2216 }
2217 return true;
2218
2219 case PORT_VLAN_DOT1Q_TUNNEL:
2220 if (!xbundle_allows_cvlan(in_xbundle, vid)) {
2221 xlate_report_error(ctx, "dropping VLAN %"PRIu16" packet received "
2222 "on dot1q-tunnel port %s that excludes this "
2223 "VLAN", vid, in_xbundle->name);
2224 return false;
2225 }
2226 return true;
2227
2228 default:
2229 OVS_NOT_REACHED();
2230 }
2231
2232 }
2233
2234 static void
2235 xvlan_copy(struct xvlan *dst, const struct xvlan *src)
2236 {
2237 *dst = *src;
2238 }
2239
2240 static void
2241 xvlan_pop(struct xvlan *src)
2242 {
2243 memmove(&src->v[0], &src->v[1], sizeof(src->v) - sizeof(src->v[0]));
2244 memset(&src->v[FLOW_MAX_VLAN_HEADERS - 1], 0,
2245 sizeof(src->v[FLOW_MAX_VLAN_HEADERS - 1]));
2246 }
2247
2248 static void
2249 xvlan_push_uninit(struct xvlan *src)
2250 {
2251 memmove(&src->v[1], &src->v[0], sizeof(src->v) - sizeof(src->v[0]));
2252 memset(&src->v[0], 0, sizeof(src->v[0]));
2253 }
2254
2255 /* Extract VLAN information (headers) from flow */
2256 static void
2257 xvlan_extract(const struct flow *flow, struct xvlan *xvlan)
2258 {
2259 int i;
2260 memset(xvlan, 0, sizeof(*xvlan));
2261 for (i = 0; i < FLOW_MAX_VLAN_HEADERS; i++) {
2262 if (!eth_type_vlan(flow->vlans[i].tpid) ||
2263 !(flow->vlans[i].tci & htons(VLAN_CFI))) {
2264 break;
2265 }
2266 xvlan->v[i].tpid = ntohs(flow->vlans[i].tpid);
2267 xvlan->v[i].vid = vlan_tci_to_vid(flow->vlans[i].tci);
2268 xvlan->v[i].pcp = ntohs(flow->vlans[i].tci) & VLAN_PCP_MASK;
2269 }
2270 }
2271
2272 /* Put VLAN information (headers) to flow */
2273 static void
2274 xvlan_put(struct flow *flow, const struct xvlan *xvlan,
2275 enum port_priority_tags_mode use_priority_tags)
2276 {
2277 ovs_be16 tci;
2278 int i;
2279 for (i = 0; i < FLOW_MAX_VLAN_HEADERS; i++) {
2280 tci = htons(xvlan->v[i].vid | (xvlan->v[i].pcp & VLAN_PCP_MASK));
2281 if (tci || ((use_priority_tags == PORT_PRIORITY_TAGS_ALWAYS) &&
2282 xvlan->v[i].tpid)) {
2283 tci |= htons(VLAN_CFI);
2284 flow->vlans[i].tpid = xvlan->v[i].tpid ?
2285 htons(xvlan->v[i].tpid) :
2286 htons(ETH_TYPE_VLAN_8021Q);
2287 }
2288 flow->vlans[i].tci = tci;
2289 }
2290 }
2291
2292 /* Given 'in_xvlan', extracted from the input 802.1Q headers received as part
2293 * of a packet, and 'in_xbundle', the bundle on which the packet was received,
2294 * returns the VLANs of the packet during bridge internal processing. */
2295 static void
2296 xvlan_input_translate(const struct xbundle *in_xbundle,
2297 const struct xvlan *in_xvlan, struct xvlan *xvlan)
2298 {
2299
2300 switch (in_xbundle->vlan_mode) {
2301 case PORT_VLAN_ACCESS:
2302 memset(xvlan, 0, sizeof(*xvlan));
2303 xvlan->v[0].tpid = in_xvlan->v[0].tpid ? in_xvlan->v[0].tpid :
2304 ETH_TYPE_VLAN_8021Q;
2305 xvlan->v[0].vid = in_xbundle->vlan;
2306 xvlan->v[0].pcp = in_xvlan->v[0].pcp;
2307 break;
2308
2309 case PORT_VLAN_TRUNK:
2310 xvlan_copy(xvlan, in_xvlan);
2311 break;
2312
2313 case PORT_VLAN_NATIVE_UNTAGGED:
2314 case PORT_VLAN_NATIVE_TAGGED:
2315 xvlan_copy(xvlan, in_xvlan);
2316 if (!in_xvlan->v[0].vid) {
2317 xvlan->v[0].tpid = in_xvlan->v[0].tpid ? in_xvlan->v[0].tpid :
2318 ETH_TYPE_VLAN_8021Q;
2319 xvlan->v[0].vid = in_xbundle->vlan;
2320 xvlan->v[0].pcp = in_xvlan->v[0].pcp;
2321 }
2322 break;
2323
2324 case PORT_VLAN_DOT1Q_TUNNEL:
2325 xvlan_copy(xvlan, in_xvlan);
2326 xvlan_push_uninit(xvlan);
2327 xvlan->v[0].tpid = in_xbundle->qinq_ethtype;
2328 xvlan->v[0].vid = in_xbundle->vlan;
2329 xvlan->v[0].pcp = 0;
2330 break;
2331
2332 default:
2333 OVS_NOT_REACHED();
2334 }
2335 }
2336
2337 /* Given 'xvlan', the VLANs of a packet during internal processing, and
2338 * 'out_xbundle', a bundle on which the packet is to be output, returns the
2339 * VLANs that should be included in output packet. */
2340 static void
2341 xvlan_output_translate(const struct xbundle *out_xbundle,
2342 const struct xvlan *xvlan, struct xvlan *out_xvlan)
2343 {
2344 switch (out_xbundle->vlan_mode) {
2345 case PORT_VLAN_ACCESS:
2346 memset(out_xvlan, 0, sizeof(*out_xvlan));
2347 break;
2348
2349 case PORT_VLAN_TRUNK:
2350 case PORT_VLAN_NATIVE_TAGGED:
2351 xvlan_copy(out_xvlan, xvlan);
2352 break;
2353
2354 case PORT_VLAN_NATIVE_UNTAGGED:
2355 xvlan_copy(out_xvlan, xvlan);
2356 if (xvlan->v[0].vid == out_xbundle->vlan) {
2357 xvlan_pop(out_xvlan);
2358 }
2359 break;
2360
2361 case PORT_VLAN_DOT1Q_TUNNEL:
2362 xvlan_copy(out_xvlan, xvlan);
2363 xvlan_pop(out_xvlan);
2364 break;
2365
2366 default:
2367 OVS_NOT_REACHED();
2368 }
2369 }
2370
2371 /* If output xbundle is dot1q-tunnel, set mask bits of cvlan */
2372 static void
2373 check_and_set_cvlan_mask(struct flow_wildcards *wc,
2374 const struct xbundle *xbundle)
2375 {
2376 if (xbundle->vlan_mode == PORT_VLAN_DOT1Q_TUNNEL && xbundle->cvlans) {
2377 wc->masks.vlans[1].tci = htons(0xffff);
2378 }
2379 }
2380
2381 static void
2382 output_normal(struct xlate_ctx *ctx, const struct xbundle *out_xbundle,
2383 const struct xvlan *xvlan)
2384 {
2385 uint16_t vid;
2386 union flow_vlan_hdr old_vlans[FLOW_MAX_VLAN_HEADERS];
2387 struct xport *xport;
2388 struct xlate_bond_recirc xr;
2389 bool use_recirc = false;
2390 struct xvlan out_xvlan;
2391
2392 check_and_set_cvlan_mask(ctx->wc, out_xbundle);
2393
2394 xvlan_output_translate(out_xbundle, xvlan, &out_xvlan);
2395 if (out_xbundle->use_priority_tags) {
2396 out_xvlan.v[0].pcp = ntohs(ctx->xin->flow.vlans[0].tci) &
2397 VLAN_PCP_MASK;
2398 }
2399 vid = out_xvlan.v[0].vid;
2400 if (ovs_list_is_empty(&out_xbundle->xports)) {
2401 /* Partially configured bundle with no slaves. Drop the packet. */
2402 return;
2403 } else if (!out_xbundle->bond) {
2404 xport = CONTAINER_OF(ovs_list_front(&out_xbundle->xports), struct xport,
2405 bundle_node);
2406 } else {
2407 struct flow_wildcards *wc = ctx->wc;
2408 struct ofport_dpif *ofport;
2409
2410 if (ctx->xbridge->support.odp.recirc) {
2411 /* In case recirculation is not actually in use, 'xr.recirc_id'
2412 * will be set to '0', since a valid 'recirc_id' can
2413 * not be zero. */
2414 bond_update_post_recirc_rules(out_xbundle->bond,
2415 &xr.recirc_id,
2416 &xr.hash_basis);
2417 if (xr.recirc_id) {
2418 /* Use recirculation instead of output. */
2419 use_recirc = true;
2420 xr.hash_alg = OVS_HASH_ALG_L4;
2421 /* Recirculation does not require unmasking hash fields. */
2422 wc = NULL;
2423 }
2424 }
2425
2426 ofport = bond_choose_output_slave(out_xbundle->bond,
2427 &ctx->xin->flow, wc, vid);
2428 xport = xport_lookup(ctx->xcfg, ofport);
2429
2430 if (!xport) {
2431 /* No slaves enabled, so drop packet. */
2432 return;
2433 }
2434
2435 /* If use_recirc is set, the main thread will handle stats
2436 * accounting for this bond. */
2437 if (!use_recirc) {
2438 if (ctx->xin->resubmit_stats) {
2439 bond_account(out_xbundle->bond, &ctx->xin->flow, vid,
2440 ctx->xin->resubmit_stats->n_bytes);
2441 }
2442 if (ctx->xin->xcache) {
2443 struct xc_entry *entry;
2444 struct flow *flow;
2445
2446 flow = &ctx->xin->flow;
2447 entry = xlate_cache_add_entry(ctx->xin->xcache, XC_BOND);
2448 entry->bond.bond = bond_ref(out_xbundle->bond);
2449 entry->bond.flow = xmemdup(flow, sizeof *flow);
2450 entry->bond.vid = vid;
2451 }
2452 }
2453 }
2454
2455 memcpy(&old_vlans, &ctx->xin->flow.vlans, sizeof(old_vlans));
2456 xvlan_put(&ctx->xin->flow, &out_xvlan, out_xbundle->use_priority_tags);
2457
2458 compose_output_action(ctx, xport->ofp_port, use_recirc ? &xr : NULL,
2459 false, false);
2460 memcpy(&ctx->xin->flow.vlans, &old_vlans, sizeof(old_vlans));
2461 }
2462
2463 /* A VM broadcasts a gratuitous ARP to indicate that it has resumed after
2464 * migration. Older Citrix-patched Linux DomU used gratuitous ARP replies to
2465 * indicate this; newer upstream kernels use gratuitous ARP requests. */
2466 static bool
2467 is_gratuitous_arp(const struct flow *flow, struct flow_wildcards *wc)
2468 {
2469 if (flow->dl_type != htons(ETH_TYPE_ARP)) {
2470 return false;
2471 }
2472
2473 memset(&wc->masks.dl_dst, 0xff, sizeof wc->masks.dl_dst);
2474 if (!eth_addr_is_broadcast(flow->dl_dst)) {
2475 return false;
2476 }
2477
2478 memset(&wc->masks.nw_proto, 0xff, sizeof wc->masks.nw_proto);
2479 if (flow->nw_proto == ARP_OP_REPLY) {
2480 return true;
2481 } else if (flow->nw_proto == ARP_OP_REQUEST) {
2482 memset(&wc->masks.nw_src, 0xff, sizeof wc->masks.nw_src);
2483 memset(&wc->masks.nw_dst, 0xff, sizeof wc->masks.nw_dst);
2484
2485 return flow->nw_src == flow->nw_dst;
2486 } else {
2487 return false;
2488 }
2489 }
2490
2491 /* Determines whether packets in 'flow' within 'xbridge' should be forwarded or
2492 * dropped. Returns true if they may be forwarded, false if they should be
2493 * dropped.
2494 *
2495 * 'in_port' must be the xport that corresponds to flow->in_port.
2496 * 'in_port' must be part of a bundle (e.g. in_port->bundle must be nonnull).
2497 *
2498 * 'vlan' must be the VLAN that corresponds to flow->vlan_tci on 'in_port', as
2499 * returned by input_vid_to_vlan(). It must be a valid VLAN for 'in_port', as
2500 * checked by input_vid_is_valid().
2501 *
2502 * May also add tags to '*tags', although the current implementation only does
2503 * so in one special case.
2504 */
2505 static bool
2506 is_admissible(struct xlate_ctx *ctx, struct xport *in_port,
2507 uint16_t vlan)
2508 {
2509 struct xbundle *in_xbundle = in_port->xbundle;
2510 const struct xbridge *xbridge = ctx->xbridge;
2511 struct flow *flow = &ctx->xin->flow;
2512
2513 /* Drop frames for reserved multicast addresses
2514 * only if forward_bpdu option is absent. */
2515 if (!xbridge->forward_bpdu && eth_addr_is_reserved(flow->dl_dst)) {
2516 xlate_report(ctx, OFT_DETAIL,
2517 "packet has reserved destination MAC, dropping");
2518 return false;
2519 }
2520
2521 if (in_xbundle->bond) {
2522 struct mac_entry *mac;
2523
2524 switch (bond_check_admissibility(in_xbundle->bond, in_port->ofport,
2525 flow->dl_dst)) {
2526 case BV_ACCEPT:
2527 break;
2528
2529 case BV_DROP:
2530 xlate_report(ctx, OFT_DETAIL,
2531 "bonding refused admissibility, dropping");
2532 return false;
2533
2534 case BV_DROP_IF_MOVED:
2535 ovs_rwlock_rdlock(&xbridge->ml->rwlock);
2536 mac = mac_learning_lookup(xbridge->ml, flow->dl_src, vlan);
2537 if (mac
2538 && mac_entry_get_port(xbridge->ml, mac) != in_xbundle->ofbundle
2539 && (!is_gratuitous_arp(flow, ctx->wc)
2540 || mac_entry_is_grat_arp_locked(mac))) {
2541 ovs_rwlock_unlock(&xbridge->ml->rwlock);
2542 xlate_report(ctx, OFT_DETAIL,
2543 "SLB bond thinks this packet looped back, "
2544 "dropping");
2545 return false;
2546 }
2547 ovs_rwlock_unlock(&xbridge->ml->rwlock);
2548 break;
2549 }
2550 }
2551
2552 return true;
2553 }
2554
2555 static bool
2556 update_learning_table__(const struct xbridge *xbridge,
2557 struct xbundle *in_xbundle, struct eth_addr dl_src,
2558 int vlan, bool is_grat_arp)
2559 {
2560 return (in_xbundle == &ofpp_none_bundle
2561 || !mac_learning_update(xbridge->ml, dl_src, vlan,
2562 is_grat_arp,
2563 in_xbundle->bond != NULL,
2564 in_xbundle->ofbundle));
2565 }
2566
2567 static void
2568 update_learning_table(const struct xlate_ctx *ctx,
2569 struct xbundle *in_xbundle, struct eth_addr dl_src,
2570 int vlan, bool is_grat_arp)
2571 {
2572 if (!update_learning_table__(ctx->xbridge, in_xbundle, dl_src, vlan,
2573 is_grat_arp)) {
2574 xlate_report_debug(ctx, OFT_DETAIL, "learned that "ETH_ADDR_FMT" is "
2575 "on port %s in VLAN %d",
2576 ETH_ADDR_ARGS(dl_src), in_xbundle->name, vlan);
2577 }
2578 }
2579
2580 /* Updates multicast snooping table 'ms' given that a packet matching 'flow'
2581 * was received on 'in_xbundle' in 'vlan' and is either Report or Query. */
2582 static void
2583 update_mcast_snooping_table4__(const struct xlate_ctx *ctx,
2584 const struct flow *flow,
2585 struct mcast_snooping *ms, int vlan,
2586 struct xbundle *in_xbundle,
2587 const struct dp_packet *packet)
2588 OVS_REQ_WRLOCK(ms->rwlock)
2589 {
2590 const struct igmp_header *igmp;
2591 int count;
2592 size_t offset;
2593 ovs_be32 ip4 = flow->igmp_group_ip4;
2594
2595 offset = (char *) dp_packet_l4(packet) - (char *) dp_packet_data(packet);
2596 igmp = dp_packet_at(packet, offset, IGMP_HEADER_LEN);
2597 if (!igmp || csum(igmp, dp_packet_l4_size(packet)) != 0) {
2598 xlate_report_debug(ctx, OFT_DETAIL,
2599 "multicast snooping received bad IGMP "
2600 "checksum on port %s in VLAN %d",
2601 in_xbundle->name, vlan);
2602 return;
2603 }
2604
2605 switch (ntohs(flow->tp_src)) {
2606 case IGMP_HOST_MEMBERSHIP_REPORT:
2607 case IGMPV2_HOST_MEMBERSHIP_REPORT:
2608 if (mcast_snooping_add_group4(ms, ip4, vlan, in_xbundle->ofbundle)) {
2609 xlate_report_debug(ctx, OFT_DETAIL,
2610 "multicast snooping learned that "
2611 IP_FMT" is on port %s in VLAN %d",
2612 IP_ARGS(ip4), in_xbundle->name, vlan);
2613 }
2614 break;
2615 case IGMP_HOST_LEAVE_MESSAGE:
2616 if (mcast_snooping_leave_group4(ms, ip4, vlan, in_xbundle->ofbundle)) {
2617 xlate_report_debug(ctx, OFT_DETAIL, "multicast snooping leaving "
2618 IP_FMT" is on port %s in VLAN %d",
2619 IP_ARGS(ip4), in_xbundle->name, vlan);
2620 }
2621 break;
2622 case IGMP_HOST_MEMBERSHIP_QUERY:
2623 if (flow->nw_src && mcast_snooping_add_mrouter(ms, vlan,
2624 in_xbundle->ofbundle)) {
2625 xlate_report_debug(ctx, OFT_DETAIL, "multicast snooping query "
2626 "from "IP_FMT" is on port %s in VLAN %d",
2627 IP_ARGS(flow->nw_src), in_xbundle->name, vlan);
2628 }
2629 break;
2630 case IGMPV3_HOST_MEMBERSHIP_REPORT:
2631 count = mcast_snooping_add_report(ms, packet, vlan,
2632 in_xbundle->ofbundle);
2633 if (count) {
2634 xlate_report_debug(ctx, OFT_DETAIL, "multicast snooping processed "
2635 "%d addresses on port %s in VLAN %d",
2636 count, in_xbundle->name, vlan);
2637 }
2638 break;
2639 }
2640 }
2641
2642 static void
2643 update_mcast_snooping_table6__(const struct xlate_ctx *ctx,
2644 const struct flow *flow,
2645 struct mcast_snooping *ms, int vlan,
2646 struct xbundle *in_xbundle,
2647 const struct dp_packet *packet)
2648 OVS_REQ_WRLOCK(ms->rwlock)
2649 {
2650 const struct mld_header *mld;
2651 int count;
2652 size_t offset;
2653
2654 offset = (char *) dp_packet_l4(packet) - (char *) dp_packet_data(packet);
2655 mld = dp_packet_at(packet, offset, MLD_HEADER_LEN);
2656
2657 if (!mld ||
2658 packet_csum_upperlayer6(dp_packet_l3(packet),
2659 mld, IPPROTO_ICMPV6,
2660 dp_packet_l4_size(packet)) != 0) {
2661 xlate_report_debug(ctx, OFT_DETAIL, "multicast snooping received "
2662 "bad MLD checksum on port %s in VLAN %d",
2663 in_xbundle->name, vlan);
2664 return;
2665 }
2666
2667 switch (ntohs(flow->tp_src)) {
2668 case MLD_QUERY:
2669 if (!ipv6_addr_equals(&flow->ipv6_src, &in6addr_any)
2670 && mcast_snooping_add_mrouter(ms, vlan, in_xbundle->ofbundle)) {
2671 xlate_report_debug(ctx, OFT_DETAIL, "multicast snooping query on "
2672 "port %s in VLAN %d", in_xbundle->name, vlan);
2673 }
2674 break;
2675 case MLD_REPORT:
2676 case MLD_DONE:
2677 case MLD2_REPORT:
2678 count = mcast_snooping_add_mld(ms, packet, vlan, in_xbundle->ofbundle);
2679 if (count) {
2680 xlate_report_debug(ctx, OFT_DETAIL, "multicast snooping processed "
2681 "%d addresses on port %s in VLAN %d",
2682 count, in_xbundle->name, vlan);
2683 }
2684 break;
2685 }
2686 }
2687
2688 /* Updates multicast snooping table 'ms' given that a packet matching 'flow'
2689 * was received on 'in_xbundle' in 'vlan'. */
2690 static void
2691 update_mcast_snooping_table(const struct xlate_ctx *ctx,
2692 const struct flow *flow, int vlan,
2693 struct xbundle *in_xbundle,
2694 const struct dp_packet *packet)
2695 {
2696 struct mcast_snooping *ms = ctx->xbridge->ms;
2697 struct xbundle *mcast_xbundle;
2698 struct mcast_port_bundle *fport;
2699
2700 /* Don't learn the OFPP_NONE port. */
2701 if (in_xbundle == &ofpp_none_bundle) {
2702 return;
2703 }
2704
2705 /* Don't learn from flood ports */
2706 mcast_xbundle = NULL;
2707 ovs_rwlock_wrlock(&ms->rwlock);
2708 LIST_FOR_EACH(fport, node, &ms->fport_list) {
2709 mcast_xbundle = xbundle_lookup(ctx->xcfg, fport->port);
2710 if (mcast_xbundle == in_xbundle) {
2711 break;
2712 }
2713 }
2714
2715 if (!mcast_xbundle || mcast_xbundle != in_xbundle) {
2716 if (flow->dl_type == htons(ETH_TYPE_IP)) {
2717 update_mcast_snooping_table4__(ctx, flow, ms, vlan,
2718 in_xbundle, packet);
2719 } else {
2720 update_mcast_snooping_table6__(ctx, flow, ms, vlan,
2721 in_xbundle, packet);
2722 }
2723 }
2724 ovs_rwlock_unlock(&ms->rwlock);
2725 }
2726 \f
2727 /* A list of multicast output ports.
2728 *
2729 * We accumulate output ports and then do all the outputs afterward. It would
2730 * be more natural to do the outputs one at a time as we discover the need for
2731 * each one, but this can cause a deadlock because we need to take the
2732 * mcast_snooping's rwlock for reading to iterate through the port lists and
2733 * doing an output, if it goes to a patch port, can eventually come back to the
2734 * same mcast_snooping and attempt to take the write lock (see
2735 * https://github.com/openvswitch/ovs-issues/issues/153). */
2736 struct mcast_output {
2737 /* Discrete ports. */
2738 struct xbundle **xbundles;
2739 size_t n, allocated;
2740
2741 /* If set, flood to all ports. */
2742 bool flood;
2743 };
2744 #define MCAST_OUTPUT_INIT { NULL, 0, 0, false }
2745
2746 /* Add 'mcast_bundle' to 'out'. */
2747 static void
2748 mcast_output_add(struct mcast_output *out, struct xbundle *mcast_xbundle)
2749 {
2750 if (out->n >= out->allocated) {
2751 out->xbundles = x2nrealloc(out->xbundles, &out->allocated,
2752 sizeof *out->xbundles);
2753 }
2754 out->xbundles[out->n++] = mcast_xbundle;
2755 }
2756
2757 /* Outputs the packet in 'ctx' to all of the output ports in 'out', given input
2758 * bundle 'in_xbundle' and the current 'xvlan'. */
2759 static void
2760 mcast_output_finish(struct xlate_ctx *ctx, struct mcast_output *out,
2761 struct xbundle *in_xbundle, struct xvlan *xvlan)
2762 {
2763 if (out->flood) {
2764 xlate_normal_flood(ctx, in_xbundle, xvlan);
2765 } else {
2766 for (size_t i = 0; i < out->n; i++) {
2767 output_normal(ctx, out->xbundles[i], xvlan);
2768 }
2769 }
2770
2771 free(out->xbundles);
2772 }
2773
2774 /* send the packet to ports having the multicast group learned */
2775 static void
2776 xlate_normal_mcast_send_group(struct xlate_ctx *ctx,
2777 struct mcast_snooping *ms OVS_UNUSED,
2778 struct mcast_group *grp,
2779 struct xbundle *in_xbundle,
2780 struct mcast_output *out)
2781 OVS_REQ_RDLOCK(ms->rwlock)
2782 {
2783 struct mcast_group_bundle *b;
2784 struct xbundle *mcast_xbundle;
2785
2786 LIST_FOR_EACH(b, bundle_node, &grp->bundle_lru) {
2787 mcast_xbundle = xbundle_lookup(ctx->xcfg, b->port);
2788 if (mcast_xbundle && mcast_xbundle != in_xbundle) {
2789 xlate_report(ctx, OFT_DETAIL, "forwarding to mcast group port");
2790 mcast_output_add(out, mcast_xbundle);
2791 } else if (!mcast_xbundle) {
2792 xlate_report(ctx, OFT_WARN,
2793 "mcast group port is unknown, dropping");
2794 } else {
2795 xlate_report(ctx, OFT_DETAIL,
2796 "mcast group port is input port, dropping");
2797 }
2798 }
2799 }
2800
2801 /* send the packet to ports connected to multicast routers */
2802 static void
2803 xlate_normal_mcast_send_mrouters(struct xlate_ctx *ctx,
2804 struct mcast_snooping *ms,
2805 struct xbundle *in_xbundle,
2806 const struct xvlan *xvlan,
2807 struct mcast_output *out)
2808 OVS_REQ_RDLOCK(ms->rwlock)
2809 {
2810 struct mcast_mrouter_bundle *mrouter;
2811 struct xbundle *mcast_xbundle;
2812
2813 LIST_FOR_EACH(mrouter, mrouter_node, &ms->mrouter_lru) {
2814 mcast_xbundle = xbundle_lookup(ctx->xcfg, mrouter->port);
2815 if (mcast_xbundle && mcast_xbundle != in_xbundle
2816 && mrouter->vlan == xvlan->v[0].vid) {
2817 xlate_report(ctx, OFT_DETAIL, "forwarding to mcast router port");
2818 mcast_output_add(out, mcast_xbundle);
2819 } else if (!mcast_xbundle) {
2820 xlate_report(ctx, OFT_WARN,
2821 "mcast router port is unknown, dropping");
2822 } else if (mrouter->vlan != xvlan->v[0].vid) {
2823 xlate_report(ctx, OFT_DETAIL,
2824 "mcast router is on another vlan, dropping");
2825 } else {
2826 xlate_report(ctx, OFT_DETAIL,
2827 "mcast router port is input port, dropping");
2828 }
2829 }
2830 }
2831
2832 /* send the packet to ports flagged to be flooded */
2833 static void
2834 xlate_normal_mcast_send_fports(struct xlate_ctx *ctx,
2835 struct mcast_snooping *ms,
2836 struct xbundle *in_xbundle,
2837 struct mcast_output *out)
2838 OVS_REQ_RDLOCK(ms->rwlock)
2839 {
2840 struct mcast_port_bundle *fport;
2841 struct xbundle *mcast_xbundle;
2842
2843 LIST_FOR_EACH(fport, node, &ms->fport_list) {
2844 mcast_xbundle = xbundle_lookup(ctx->xcfg, fport->port);
2845 if (mcast_xbundle && mcast_xbundle != in_xbundle) {
2846 xlate_report(ctx, OFT_DETAIL, "forwarding to mcast flood port");
2847 mcast_output_add(out, mcast_xbundle);
2848 } else if (!mcast_xbundle) {
2849 xlate_report(ctx, OFT_WARN,
2850 "mcast flood port is unknown, dropping");
2851 } else {
2852 xlate_report(ctx, OFT_DETAIL,
2853 "mcast flood port is input port, dropping");
2854 }
2855 }
2856 }
2857
2858 /* forward the Reports to configured ports */
2859 static void
2860 xlate_normal_mcast_send_rports(struct xlate_ctx *ctx,
2861 struct mcast_snooping *ms,
2862 struct xbundle *in_xbundle,
2863 struct mcast_output *out)
2864 OVS_REQ_RDLOCK(ms->rwlock)
2865 {
2866 struct mcast_port_bundle *rport;
2867 struct xbundle *mcast_xbundle;
2868
2869 LIST_FOR_EACH(rport, node, &ms->rport_list) {
2870 mcast_xbundle = xbundle_lookup(ctx->xcfg, rport->port);
2871 if (mcast_xbundle
2872 && mcast_xbundle != in_xbundle
2873 && mcast_xbundle->ofbundle != in_xbundle->ofbundle) {
2874 xlate_report(ctx, OFT_DETAIL,
2875 "forwarding report to mcast flagged port");
2876 mcast_output_add(out, mcast_xbundle);
2877 } else if (!mcast_xbundle) {
2878 xlate_report(ctx, OFT_WARN,
2879 "mcast port is unknown, dropping the report");
2880 } else {
2881 xlate_report(ctx, OFT_DETAIL,
2882 "mcast port is input port, dropping the Report");
2883 }
2884 }
2885 }
2886
2887 static void
2888 xlate_normal_flood(struct xlate_ctx *ctx, struct xbundle *in_xbundle,
2889 struct xvlan *xvlan)
2890 {
2891 struct xbundle *xbundle;
2892
2893 LIST_FOR_EACH (xbundle, list_node, &ctx->xbridge->xbundles) {
2894 if (xbundle != in_xbundle
2895 && xbundle->ofbundle != in_xbundle->ofbundle
2896 && xbundle_includes_vlan(xbundle, xvlan)
2897 && xbundle->floodable
2898 && !xbundle_mirror_out(ctx->xbridge, xbundle)) {
2899 output_normal(ctx, xbundle, xvlan);
2900 }
2901 }
2902 ctx->nf_output_iface = NF_OUT_FLOOD;
2903 }
2904
2905 static bool
2906 is_ip_local_multicast(const struct flow *flow, struct flow_wildcards *wc)
2907 {
2908 if (flow->dl_type == htons(ETH_TYPE_IP)) {
2909 memset(&wc->masks.nw_dst, 0xff, sizeof wc->masks.nw_dst);
2910 return ip_is_local_multicast(flow->nw_dst);
2911 } else if (flow->dl_type == htons(ETH_TYPE_IPV6)) {
2912 memset(&wc->masks.ipv6_dst, 0xff, sizeof wc->masks.ipv6_dst);
2913 return ipv6_is_all_hosts(&flow->ipv6_dst);
2914 } else {
2915 return false;
2916 }
2917 }
2918
2919 static void
2920 xlate_normal(struct xlate_ctx *ctx)
2921 {
2922 struct flow_wildcards *wc = ctx->wc;
2923 struct flow *flow = &ctx->xin->flow;
2924 struct xbundle *in_xbundle;
2925 struct xport *in_port;
2926 struct mac_entry *mac;
2927 void *mac_port;
2928 struct xvlan in_xvlan;
2929 struct xvlan xvlan;
2930 uint16_t vlan;
2931
2932 memset(&wc->masks.dl_src, 0xff, sizeof wc->masks.dl_src);
2933 memset(&wc->masks.dl_dst, 0xff, sizeof wc->masks.dl_dst);
2934 wc->masks.vlans[0].tci |= htons(VLAN_VID_MASK | VLAN_CFI);
2935
2936 in_xbundle = lookup_input_bundle(ctx, flow->in_port.ofp_port, &in_port);
2937 if (!in_xbundle) {
2938 xlate_report(ctx, OFT_WARN, "no input bundle, dropping");
2939 return;
2940 }
2941
2942 /* Drop malformed frames. */
2943 if (eth_type_vlan(flow->dl_type) &&
2944 !(flow->vlans[0].tci & htons(VLAN_CFI))) {
2945 if (ctx->xin->packet != NULL) {
2946 xlate_report_error(ctx, "dropping packet with partial "
2947 "VLAN tag received on port %s",
2948 in_xbundle->name);
2949 }
2950 xlate_report(ctx, OFT_WARN, "partial VLAN tag, dropping");
2951 return;
2952 }
2953
2954 /* Drop frames on bundles reserved for mirroring. */
2955 if (xbundle_mirror_out(ctx->xbridge, in_xbundle)) {
2956 if (ctx->xin->packet != NULL) {
2957 xlate_report_error(ctx, "dropping packet received on port %s, "
2958 "which is reserved exclusively for mirroring",
2959 in_xbundle->name);
2960 }
2961 xlate_report(ctx, OFT_WARN,
2962 "input port is mirror output port, dropping");
2963 return;
2964 }
2965
2966 /* Check VLAN. */
2967 xvlan_extract(flow, &in_xvlan);
2968 if (!input_vid_is_valid(ctx, in_xvlan.v[0].vid, in_xbundle)) {
2969 xlate_report(ctx, OFT_WARN,
2970 "disallowed VLAN VID for this input port, dropping");
2971 return;
2972 }
2973 xvlan_input_translate(in_xbundle, &in_xvlan, &xvlan);
2974 vlan = xvlan.v[0].vid;
2975
2976 /* Check other admissibility requirements. */
2977 if (in_port && !is_admissible(ctx, in_port, vlan)) {
2978 return;
2979 }
2980
2981 /* Learn source MAC. */
2982 bool is_grat_arp = is_gratuitous_arp(flow, wc);
2983 if (ctx->xin->allow_side_effects
2984 && flow->packet_type == htonl(PT_ETH)
2985 && in_port->pt_mode != NETDEV_PT_LEGACY_L3
2986 ) {
2987 update_learning_table(ctx, in_xbundle, flow->dl_src, vlan,
2988 is_grat_arp);
2989 }
2990 if (ctx->xin->xcache && in_xbundle != &ofpp_none_bundle) {
2991 struct xc_entry *entry;
2992
2993 /* Save just enough info to update mac learning table later. */
2994 entry = xlate_cache_add_entry(ctx->xin->xcache, XC_NORMAL);
2995 entry->normal.ofproto = ctx->xbridge->ofproto;
2996 entry->normal.in_port = flow->in_port.ofp_port;
2997 entry->normal.dl_src = flow->dl_src;
2998 entry->normal.vlan = vlan;
2999 entry->normal.is_gratuitous_arp = is_grat_arp;
3000 }
3001
3002 /* Determine output bundle. */
3003 if (mcast_snooping_enabled(ctx->xbridge->ms)
3004 && !eth_addr_is_broadcast(flow->dl_dst)
3005 && eth_addr_is_multicast(flow->dl_dst)
3006 && is_ip_any(flow)) {
3007 struct mcast_snooping *ms = ctx->xbridge->ms;
3008 struct mcast_group *grp = NULL;
3009
3010 if (is_igmp(flow, wc)) {
3011 /*
3012 * IGMP packets need to take the slow path, in order to be
3013 * processed for mdb updates. That will prevent expires
3014 * firing off even after hosts have sent reports.
3015 */
3016 ctx->xout->slow |= SLOW_ACTION;
3017
3018 memset(&wc->masks.tp_src, 0xff, sizeof wc->masks.tp_src);
3019 if (mcast_snooping_is_membership(flow->tp_src) ||
3020 mcast_snooping_is_query(flow->tp_src)) {
3021 if (ctx->xin->allow_side_effects && ctx->xin->packet) {
3022 update_mcast_snooping_table(ctx, flow, vlan,
3023 in_xbundle, ctx->xin->packet);
3024 }
3025 }
3026
3027 if (mcast_snooping_is_membership(flow->tp_src)) {
3028 struct mcast_output out = MCAST_OUTPUT_INIT;
3029
3030 ovs_rwlock_rdlock(&ms->rwlock);
3031 xlate_normal_mcast_send_mrouters(ctx, ms, in_xbundle, &xvlan,
3032 &out);
3033 /* RFC4541: section 2.1.1, item 1: A snooping switch should
3034 * forward IGMP Membership Reports only to those ports where
3035 * multicast routers are attached. Alternatively stated: a
3036 * snooping switch should not forward IGMP Membership Reports
3037 * to ports on which only hosts are attached.
3038 * An administrative control may be provided to override this
3039 * restriction, allowing the report messages to be flooded to
3040 * other ports. */
3041 xlate_normal_mcast_send_rports(ctx, ms, in_xbundle, &out);
3042 ovs_rwlock_unlock(&ms->rwlock);
3043
3044 mcast_output_finish(ctx, &out, in_xbundle, &xvlan);
3045 } else {
3046 xlate_report(ctx, OFT_DETAIL, "multicast traffic, flooding");
3047 xlate_normal_flood(ctx, in_xbundle, &xvlan);
3048 }
3049 return;
3050 } else if (is_mld(flow, wc)) {
3051 ctx->xout->slow |= SLOW_ACTION;
3052 if (ctx->xin->allow_side_effects && ctx->xin->packet) {
3053 update_mcast_snooping_table(ctx, flow, vlan,
3054 in_xbundle, ctx->xin->packet);
3055 }
3056 if (is_mld_report(flow, wc)) {
3057 struct mcast_output out = MCAST_OUTPUT_INIT;
3058
3059 ovs_rwlock_rdlock(&ms->rwlock);
3060 xlate_normal_mcast_send_mrouters(ctx, ms, in_xbundle, &xvlan,
3061 &out);
3062 xlate_normal_mcast_send_rports(ctx, ms, in_xbundle, &out);
3063 ovs_rwlock_unlock(&ms->rwlock);
3064
3065 mcast_output_finish(ctx, &out, in_xbundle, &xvlan);
3066 } else {
3067 xlate_report(ctx, OFT_DETAIL, "MLD query, flooding");
3068 xlate_normal_flood(ctx, in_xbundle, &xvlan);
3069 }
3070 } else {
3071 if (is_ip_local_multicast(flow, wc)) {
3072 /* RFC4541: section 2.1.2, item 2: Packets with a dst IP
3073 * address in the 224.0.0.x range which are not IGMP must
3074 * be forwarded on all ports */
3075 xlate_report(ctx, OFT_DETAIL,
3076 "RFC4541: section 2.1.2, item 2, flooding");
3077 xlate_normal_flood(ctx, in_xbundle, &xvlan);
3078 return;
3079 }
3080 }
3081
3082 /* forwarding to group base ports */
3083 struct mcast_output out = MCAST_OUTPUT_INIT;
3084
3085 ovs_rwlock_rdlock(&ms->rwlock);
3086 if (flow->dl_type == htons(ETH_TYPE_IP)) {
3087 grp = mcast_snooping_lookup4(ms, flow->nw_dst, vlan);
3088 } else if (flow->dl_type == htons(ETH_TYPE_IPV6)) {
3089 grp = mcast_snooping_lookup(ms, &flow->ipv6_dst, vlan);
3090 }
3091 if (grp) {
3092 xlate_normal_mcast_send_group(ctx, ms, grp, in_xbundle, &out);
3093 xlate_normal_mcast_send_fports(ctx, ms, in_xbundle, &out);
3094 xlate_normal_mcast_send_mrouters(ctx, ms, in_xbundle, &xvlan,
3095 &out);
3096 } else {
3097 if (mcast_snooping_flood_unreg(ms)) {
3098 xlate_report(ctx, OFT_DETAIL,
3099 "unregistered multicast, flooding");
3100 out.flood = true;
3101 } else {
3102 xlate_normal_mcast_send_mrouters(ctx, ms, in_xbundle, &xvlan,
3103 &out);
3104 xlate_normal_mcast_send_fports(ctx, ms, in_xbundle, &out);
3105 }
3106 }
3107 ovs_rwlock_unlock(&ms->rwlock);
3108
3109 mcast_output_finish(ctx, &out, in_xbundle, &xvlan);
3110 } else {
3111 ovs_rwlock_rdlock(&ctx->xbridge->ml->rwlock);
3112 mac = mac_learning_lookup(ctx->xbridge->ml, flow->dl_dst, vlan);
3113 mac_port = mac ? mac_entry_get_port(ctx->xbridge->ml, mac) : NULL;
3114 ovs_rwlock_unlock(&ctx->xbridge->ml->rwlock);
3115
3116 if (mac_port) {
3117 struct xbundle *mac_xbundle = xbundle_lookup(ctx->xcfg, mac_port);
3118 if (mac_xbundle
3119 && mac_xbundle != in_xbundle
3120 && mac_xbundle->ofbundle != in_xbundle->ofbundle) {
3121 xlate_report(ctx, OFT_DETAIL, "forwarding to learned port");
3122 output_normal(ctx, mac_xbundle, &xvlan);
3123 } else if (!mac_xbundle) {
3124 xlate_report(ctx, OFT_WARN,
3125 "learned port is unknown, dropping");
3126 } else {
3127 xlate_report(ctx, OFT_DETAIL,
3128 "learned port is input port, dropping");
3129 }
3130 } else {
3131 xlate_report(ctx, OFT_DETAIL,
3132 "no learned MAC for destination, flooding");
3133 xlate_normal_flood(ctx, in_xbundle, &xvlan);
3134 }
3135 }
3136 }
3137
3138 /* Appends a "sample" action for sFlow or IPFIX to 'ctx->odp_actions'. The
3139 * 'probability' is the number of packets out of UINT32_MAX to sample. The
3140 * 'cookie' is passed back in the callback for each sampled packet.
3141 * 'tunnel_out_port', if not ODPP_NONE, is added as the
3142 * OVS_USERSPACE_ATTR_EGRESS_TUN_PORT attribute. If 'include_actions',
3143 * an OVS_USERSPACE_ATTR_ACTIONS attribute is added. If
3144 * 'emit_set_tunnel', sample(sampling_port=1) would translate into
3145 * datapath sample action set(tunnel(...)), sample(...) and it is used
3146 * for sampling egress tunnel information.
3147 */
3148 static size_t
3149 compose_sample_action(struct xlate_ctx *ctx,
3150 const uint32_t probability,
3151 const struct user_action_cookie *cookie,
3152 const odp_port_t tunnel_out_port,
3153 bool include_actions)
3154 {
3155 if (probability == 0) {
3156 /* No need to generate sampling or the inner action. */
3157 return 0;
3158 }
3159
3160 /* If the slow path meter is configured by the controller,
3161 * insert a meter action before the user space action. */
3162 struct ofproto *ofproto = &ctx->xin->ofproto->up;
3163 uint32_t meter_id = ofproto->slowpath_meter_id;
3164
3165 /* When meter action is not required, avoid generate sample action
3166 * for 100% sampling rate. */
3167 bool is_sample = probability < UINT32_MAX || meter_id != UINT32_MAX;
3168 size_t sample_offset = 0, actions_offset = 0;
3169 if (is_sample) {
3170 sample_offset = nl_msg_start_nested(ctx->odp_actions,
3171 OVS_ACTION_ATTR_SAMPLE);
3172 nl_msg_put_u32(ctx->odp_actions, OVS_SAMPLE_ATTR_PROBABILITY,
3173 probability);
3174 actions_offset = nl_msg_start_nested(ctx->odp_actions,
3175 OVS_SAMPLE_ATTR_ACTIONS);
3176 }
3177
3178 if (meter_id != UINT32_MAX) {
3179 nl_msg_put_u32(ctx->odp_actions, OVS_ACTION_ATTR_METER, meter_id);
3180 }
3181
3182 odp_port_t odp_port = ofp_port_to_odp_port(
3183 ctx->xbridge, ctx->xin->flow.in_port.ofp_port);
3184 uint32_t pid = dpif_port_get_pid(ctx->xbridge->dpif, odp_port);
3185 size_t cookie_offset = odp_put_userspace_action(pid, cookie,
3186 sizeof *cookie,
3187 tunnel_out_port,
3188 include_actions,
3189 ctx->odp_actions);
3190
3191 if (is_sample) {
3192 nl_msg_end_nested(ctx->odp_actions, actions_offset);
3193 nl_msg_end_nested(ctx->odp_actions, sample_offset);
3194 }
3195
3196 return cookie_offset;
3197 }
3198
3199 /* If sFLow is not enabled, returns 0 without doing anything.
3200 *
3201 * If sFlow is enabled, appends a template "sample" action to the ODP actions
3202 * in 'ctx'. This action is a template because some of the information needed
3203 * to fill it out is not available until flow translation is complete. In this
3204 * case, this functions returns an offset, which is always nonzero, to pass
3205 * later to fix_sflow_action() to fill in the rest of the template. */
3206 static size_t
3207 compose_sflow_action(struct xlate_ctx *ctx)
3208 {
3209 struct dpif_sflow *sflow = ctx->xbridge->sflow;
3210 if (!sflow || ctx->xin->flow.in_port.ofp_port == OFPP_NONE) {
3211 return 0;
3212 }
3213
3214 struct user_action_cookie cookie = {
3215 .type = USER_ACTION_COOKIE_SFLOW,
3216 .ofp_in_port = ctx->xin->flow.in_port.ofp_port,
3217 .ofproto_uuid = ctx->xbridge->ofproto->uuid
3218 };
3219 return compose_sample_action(ctx, dpif_sflow_get_probability(sflow),
3220 &cookie, ODPP_NONE, true);
3221 }
3222
3223 /* If flow IPFIX is enabled, make sure IPFIX flow sample action
3224 * at egress point of tunnel port is just in front of corresponding
3225 * output action. If bridge IPFIX is enabled, this appends an IPFIX
3226 * sample action to 'ctx->odp_actions'. */
3227 static void
3228 compose_ipfix_action(struct xlate_ctx *ctx, odp_port_t output_odp_port)
3229 {
3230 struct dpif_ipfix *ipfix = ctx->xbridge->ipfix;
3231 odp_port_t tunnel_out_port = ODPP_NONE;
3232
3233 if (!ipfix || ctx->xin->flow.in_port.ofp_port == OFPP_NONE) {
3234 return;
3235 }
3236
3237 /* For input case, output_odp_port is ODPP_NONE, which is an invalid port
3238 * number. */
3239 if (output_odp_port == ODPP_NONE &&
3240 !dpif_ipfix_get_bridge_exporter_input_sampling(ipfix)) {
3241 return;
3242 }
3243
3244 /* For output case, output_odp_port is valid. */
3245 if (output_odp_port != ODPP_NONE) {
3246 if (!dpif_ipfix_get_bridge_exporter_output_sampling(ipfix)) {
3247 return;
3248 }
3249 /* If tunnel sampling is enabled, put an additional option attribute:
3250 * OVS_USERSPACE_ATTR_TUNNEL_OUT_PORT
3251 */
3252 if (dpif_ipfix_get_bridge_exporter_tunnel_sampling(ipfix) &&
3253 dpif_ipfix_is_tunnel_port(ipfix, output_odp_port) ) {
3254 tunnel_out_port = output_odp_port;
3255 }
3256 }
3257
3258 struct user_action_cookie cookie = {
3259 .type = USER_ACTION_COOKIE_IPFIX,
3260 .ofp_in_port = ctx->xin->flow.in_port.ofp_port,
3261 .ofproto_uuid = ctx->xbridge->ofproto->uuid,
3262 .ipfix.output_odp_port = output_odp_port
3263 };
3264 compose_sample_action(ctx,
3265 dpif_ipfix_get_bridge_exporter_probability(ipfix),
3266 &cookie, tunnel_out_port, false);
3267 }
3268
3269 /* Fix "sample" action according to data collected while composing ODP actions,
3270 * as described in compose_sflow_action().
3271 *
3272 * 'user_cookie_offset' must be the offset returned by
3273 * compose_sflow_action(). */
3274 static void
3275 fix_sflow_action(struct xlate_ctx *ctx, unsigned int user_cookie_offset)
3276 {
3277 const struct flow *base = &ctx->base_flow;
3278 struct user_action_cookie *cookie;
3279
3280 cookie = ofpbuf_at(ctx->odp_actions, user_cookie_offset, sizeof *cookie);
3281 ovs_assert(cookie->type == USER_ACTION_COOKIE_SFLOW);
3282
3283 cookie->sflow.vlan_tci = base->vlans[0].tci;
3284
3285 /* See http://www.sflow.org/sflow_version_5.txt (search for "Input/output
3286 * port information") for the interpretation of cookie->output. */
3287 switch (ctx->sflow_n_outputs) {
3288 case 0:
3289 /* 0x40000000 | 256 means "packet dropped for unknown reason". */
3290 cookie->sflow.output = 0x40000000 | 256;
3291 break;
3292
3293 case 1:
3294 cookie->sflow.output = dpif_sflow_odp_port_to_ifindex(
3295 ctx->xbridge->sflow, ctx->sflow_odp_port);
3296 if (cookie->sflow.output) {
3297 break;
3298 }
3299 /* Fall through. */
3300 default:
3301 /* 0x80000000 means "multiple output ports. */
3302 cookie->sflow.output = 0x80000000 | ctx->sflow_n_outputs;
3303 break;
3304 }
3305 }
3306
3307 static bool
3308 process_special(struct xlate_ctx *ctx, const struct xport *xport)
3309 {
3310 const struct flow *flow = &ctx->xin->flow;
3311 struct flow_wildcards *wc = ctx->wc;
3312 const struct xbridge *xbridge = ctx->xbridge;
3313 const struct dp_packet *packet = ctx->xin->packet;
3314 enum slow_path_reason slow;
3315 bool lacp_may_enable;
3316
3317 if (!xport) {
3318 slow = 0;
3319 } else if (xport->cfm && cfm_should_process_flow(xport->cfm, flow, wc)) {
3320 if (packet) {
3321 cfm_process_heartbeat(xport->cfm, packet);
3322 }
3323 slow = SLOW_CFM;
3324 } else if (xport->bfd && bfd_should_process_flow(xport->bfd, flow, wc)) {
3325 if (packet) {
3326 bfd_process_packet(xport->bfd, flow, packet);
3327 /* If POLL received, immediately sends FINAL back. */
3328 if (bfd_should_send_packet(xport->bfd)) {
3329 ofproto_dpif_monitor_port_send_soon(xport->ofport);
3330 }
3331 }
3332 slow = SLOW_BFD;
3333 } else if (xport->xbundle && xport->xbundle->lacp
3334 && flow->dl_type == htons(ETH_TYPE_LACP)) {
3335 if (packet) {
3336 lacp_may_enable = lacp_process_packet(xport->xbundle->lacp,
3337 xport->ofport, packet);
3338 /* Update LACP status in bond-slave to avoid packet-drops until
3339 * LACP state machine is run by the main thread. */
3340 if (xport->xbundle->bond && lacp_may_enable) {
3341 bond_slave_set_may_enable(xport->xbundle->bond, xport->ofport,
3342 lacp_may_enable);
3343 }
3344 }
3345 slow = SLOW_LACP;
3346 } else if ((xbridge->stp || xbridge->rstp) &&
3347 stp_should_process_flow(flow, wc)) {
3348 if (packet) {
3349 xbridge->stp
3350 ? stp_process_packet(xport, packet)
3351 : rstp_process_packet(xport, packet);
3352 }
3353 slow = SLOW_STP;
3354 } else if (xport->lldp && lldp_should_process_flow(xport->lldp, flow)) {
3355 if (packet) {
3356 lldp_process_packet(xport->lldp, packet);
3357 }
3358 slow = SLOW_LLDP;
3359 } else {
3360 slow = 0;
3361 }
3362
3363 if (slow) {
3364 ctx->xout->slow |= slow;
3365 return true;
3366 } else {
3367 return false;
3368 }
3369 }
3370
3371 static int
3372 tnl_route_lookup_flow(const struct xlate_ctx *ctx,
3373 const struct flow *oflow,
3374 struct in6_addr *ip, struct in6_addr *src,
3375 struct xport **out_port)
3376 {
3377 char out_dev[IFNAMSIZ];
3378 struct xbridge *xbridge;
3379 struct in6_addr gw;
3380 struct in6_addr dst;
3381
3382 dst = flow_tnl_dst(&oflow->tunnel);
3383 if (!ovs_router_lookup(oflow->pkt_mark, &dst, out_dev, src, &gw)) {
3384 return -ENOENT;
3385 }
3386
3387 if (ipv6_addr_is_set(&gw) &&
3388 (!IN6_IS_ADDR_V4MAPPED(&gw) || in6_addr_get_mapped_ipv4(&gw))) {
3389 *ip = gw;
3390 } else {
3391 *ip = dst;
3392 }
3393
3394 HMAP_FOR_EACH (xbridge, hmap_node, &ctx->xcfg->xbridges) {
3395 if (!strncmp(xbridge->name, out_dev, IFNAMSIZ)) {
3396 struct xport *port;
3397
3398 HMAP_FOR_EACH (port, ofp_node, &xbridge->xports) {
3399 if (!strncmp(netdev_get_name(port->netdev), out_dev, IFNAMSIZ)) {
3400 *out_port = port;
3401 return 0;
3402 }
3403 }
3404 }
3405 }
3406 return -ENOENT;
3407 }
3408
3409 static int
3410 compose_table_xlate(struct xlate_ctx *ctx, const struct xport *out_dev,
3411 struct dp_packet *packet)
3412 {
3413 struct xbridge *xbridge = out_dev->xbridge;
3414 struct ofpact_output output;
3415 struct flow flow;
3416
3417 ofpact_init(&output.ofpact, OFPACT_OUTPUT, sizeof output);
3418 flow_extract(packet, &flow);
3419 flow.in_port.ofp_port = out_dev->ofp_port;
3420 output.port = OFPP_TABLE;
3421 output.max_len = 0;
3422
3423 return ofproto_dpif_execute_actions__(xbridge->ofproto,
3424 ctx->xin->tables_version, &flow,
3425 NULL, &output.ofpact, sizeof output,
3426 ctx->depth, ctx->resubmits, packet);
3427 }
3428
3429 static void
3430 tnl_send_nd_request(struct xlate_ctx *ctx, const struct xport *out_dev,
3431 const struct eth_addr eth_src,
3432 struct in6_addr * ipv6_src, struct in6_addr * ipv6_dst)
3433 {
3434 struct dp_packet packet;
3435
3436 dp_packet_init(&packet, 0);
3437 compose_nd_ns(&packet, eth_src, ipv6_src, ipv6_dst);
3438 compose_table_xlate(ctx, out_dev, &packet);
3439 dp_packet_uninit(&packet);
3440 }
3441
3442 static void
3443 tnl_send_arp_request(struct xlate_ctx *ctx, const struct xport *out_dev,
3444 const struct eth_addr eth_src,
3445 ovs_be32 ip_src, ovs_be32 ip_dst)
3446 {
3447 struct dp_packet packet;
3448
3449 dp_packet_init(&packet, 0);
3450 compose_arp(&packet, ARP_OP_REQUEST,
3451 eth_src, eth_addr_zero, true, ip_src, ip_dst);
3452
3453 compose_table_xlate(ctx, out_dev, &packet);
3454 dp_packet_uninit(&packet);
3455 }
3456
3457 static void
3458 propagate_tunnel_data_to_flow__(struct flow *dst_flow,
3459 const struct flow *src_flow,
3460 struct eth_addr dmac, struct eth_addr smac,
3461 struct in6_addr s_ip6, ovs_be32 s_ip,
3462 bool is_tnl_ipv6, uint8_t nw_proto)
3463 {
3464 dst_flow->dl_dst = dmac;
3465 dst_flow->dl_src = smac;
3466
3467 dst_flow->packet_type = htonl(PT_ETH);
3468 dst_flow->nw_dst = src_flow->tunnel.ip_dst;
3469 dst_flow->nw_src = src_flow->tunnel.ip_src;
3470 dst_flow->ipv6_dst = src_flow->tunnel.ipv6_dst;
3471 dst_flow->ipv6_src = src_flow->tunnel.ipv6_src;
3472
3473 dst_flow->nw_frag = 0; /* Tunnel packets are unfragmented. */
3474 dst_flow->nw_tos = src_flow->tunnel.ip_tos;
3475 dst_flow->nw_ttl = src_flow->tunnel.ip_ttl;
3476 dst_flow->tp_dst = src_flow->tunnel.tp_dst;
3477 dst_flow->tp_src = src_flow->tunnel.tp_src;
3478
3479 if (is_tnl_ipv6) {
3480 dst_flow->dl_type = htons(ETH_TYPE_IPV6);
3481 if (ipv6_mask_is_any(&dst_flow->ipv6_src)
3482 && !ipv6_mask_is_any(&s_ip6)) {
3483 dst_flow->ipv6_src = s_ip6;
3484 }
3485 } else {
3486 dst_flow->dl_type = htons(ETH_TYPE_IP);
3487 if (dst_flow->nw_src == 0 && s_ip) {
3488 dst_flow->nw_src = s_ip;
3489 }
3490 }
3491 dst_flow->nw_proto = nw_proto;
3492 }
3493
3494 /*
3495 * Populate the 'flow' and 'base_flow' L3 fields to do the post tunnel push
3496 * translations.
3497 */
3498 static void
3499 propagate_tunnel_data_to_flow(struct xlate_ctx *ctx, struct eth_addr dmac,
3500 struct eth_addr smac, struct in6_addr s_ip6,
3501 ovs_be32 s_ip, bool is_tnl_ipv6,
3502 enum ovs_vport_type tnl_type)
3503 {
3504 struct flow *base_flow, *flow;
3505 flow = &ctx->xin->flow;
3506 base_flow = &ctx->base_flow;
3507 uint8_t nw_proto = 0;
3508
3509 switch (tnl_type) {
3510 case OVS_VPORT_TYPE_GRE:
3511 case OVS_VPORT_TYPE_ERSPAN:
3512 case OVS_VPORT_TYPE_IP6ERSPAN:
3513 case OVS_VPORT_TYPE_IP6GRE:
3514 nw_proto = IPPROTO_GRE;
3515 break;
3516 case OVS_VPORT_TYPE_VXLAN:
3517 case OVS_VPORT_TYPE_GENEVE:
3518 nw_proto = IPPROTO_UDP;
3519 break;
3520 case OVS_VPORT_TYPE_LISP:
3521 case OVS_VPORT_TYPE_STT:
3522 case OVS_VPORT_TYPE_UNSPEC:
3523 case OVS_VPORT_TYPE_NETDEV:
3524 case OVS_VPORT_TYPE_INTERNAL:
3525 case __OVS_VPORT_TYPE_MAX:
3526 default:
3527 OVS_NOT_REACHED();
3528 }
3529 /*
3530 * Update base_flow first followed by flow as the dst_flow gets modified
3531 * in the function.
3532 */
3533 propagate_tunnel_data_to_flow__(base_flow, flow, dmac, smac, s_ip6, s_ip,
3534 is_tnl_ipv6, nw_proto);
3535 propagate_tunnel_data_to_flow__(flow, flow, dmac, smac, s_ip6, s_ip,
3536 is_tnl_ipv6, nw_proto);
3537 }
3538
3539 static int
3540 native_tunnel_output(struct xlate_ctx *ctx, const struct xport *xport,
3541 const struct flow *flow, odp_port_t tunnel_odp_port,
3542 bool truncate)
3543 {
3544 struct netdev_tnl_build_header_params tnl_params;
3545 struct ovs_action_push_tnl tnl_push_data;
3546 struct xport *out_dev = NULL;
3547 ovs_be32 s_ip = 0, d_ip = 0;
3548 struct in6_addr s_ip6 = in6addr_any;
3549 struct in6_addr d_ip6 = in6addr_any;
3550 struct eth_addr smac;
3551 struct eth_addr dmac;
3552 int err;
3553 char buf_sip6[INET6_ADDRSTRLEN];
3554 char buf_dip6[INET6_ADDRSTRLEN];
3555
3556 /* Store sFlow data. */
3557 uint32_t sflow_n_outputs = ctx->sflow_n_outputs;
3558
3559 /* Structures to backup Ethernet and IP of base_flow. */
3560 struct flow old_base_flow;
3561 struct flow old_flow;
3562
3563 /* Backup flow & base_flow data. */
3564 memcpy(&old_base_flow, &ctx->base_flow, sizeof old_base_flow);
3565 memcpy(&old_flow, &ctx->xin->flow, sizeof old_flow);
3566
3567 if (flow->tunnel.ip_src) {
3568 in6_addr_set_mapped_ipv4(&s_ip6, flow->tunnel.ip_src);
3569 }
3570
3571 err = tnl_route_lookup_flow(ctx, flow, &d_ip6, &s_ip6, &out_dev);
3572 if (err) {
3573 xlate_report(ctx, OFT_WARN, "native tunnel routing failed");
3574 return err;
3575 }
3576
3577 xlate_report(ctx, OFT_DETAIL, "tunneling to %s via %s",
3578 ipv6_string_mapped(buf_dip6, &d_ip6),
3579 netdev_get_name(out_dev->netdev));
3580
3581 /* Use mac addr of bridge port of the peer. */
3582 err = netdev_get_etheraddr(out_dev->netdev, &smac);
3583 if (err) {
3584 xlate_report(ctx, OFT_WARN,
3585 "tunnel output device lacks Ethernet address");
3586 return err;
3587 }
3588
3589 d_ip = in6_addr_get_mapped_ipv4(&d_ip6);
3590 if (d_ip) {
3591 s_ip = in6_addr_get_mapped_ipv4(&s_ip6);
3592 }
3593
3594 err = tnl_neigh_lookup(out_dev->xbridge->name, &d_ip6, &dmac);
3595 if (err) {
3596 xlate_report(ctx, OFT_DETAIL,
3597 "neighbor cache miss for %s on bridge %s, "
3598 "sending %s request",
3599 buf_dip6, out_dev->xbridge->name, d_ip ? "ARP" : "ND");
3600 if (d_ip) {
3601 tnl_send_arp_request(ctx, out_dev, smac, s_ip, d_ip);
3602 } else {
3603 tnl_send_nd_request(ctx, out_dev, smac, &s_ip6, &d_ip6);
3604 }
3605 return err;
3606 }
3607
3608 if (ctx->xin->xcache) {
3609 struct xc_entry *entry;
3610
3611 entry = xlate_cache_add_entry(ctx->xin->xcache, XC_TNL_NEIGH);
3612 ovs_strlcpy(entry->tnl_neigh_cache.br_name, out_dev->xbridge->name,
3613 sizeof entry->tnl_neigh_cache.br_name);
3614 entry->tnl_neigh_cache.d_ipv6 = d_ip6;
3615 }
3616
3617 xlate_report(ctx, OFT_DETAIL, "tunneling from "ETH_ADDR_FMT" %s"
3618 " to "ETH_ADDR_FMT" %s",
3619 ETH_ADDR_ARGS(smac), ipv6_string_mapped(buf_sip6, &s_ip6),
3620 ETH_ADDR_ARGS(dmac), buf_dip6);
3621
3622 netdev_init_tnl_build_header_params(&tnl_params, flow, &s_ip6, dmac, smac);
3623 err = tnl_port_build_header(xport->ofport, &tnl_push_data, &tnl_params);
3624 if (err) {
3625 return err;
3626 }
3627 tnl_push_data.tnl_port = tunnel_odp_port;
3628 tnl_push_data.out_port = out_dev->odp_port;
3629
3630 /* After tunnel header has been added, MAC and IP data of flow and
3631 * base_flow need to be set properly, since there is not recirculation
3632 * any more when sending packet to tunnel. */
3633
3634 propagate_tunnel_data_to_flow(ctx, dmac, smac, s_ip6,
3635 s_ip, tnl_params.is_ipv6,
3636 tnl_push_data.tnl_type);
3637
3638 size_t clone_ofs = 0;
3639 size_t push_action_size;
3640
3641 clone_ofs = nl_msg_start_nested(ctx->odp_actions, OVS_ACTION_ATTR_CLONE);
3642 odp_put_tnl_push_action(ctx->odp_actions, &tnl_push_data);
3643 push_action_size = ctx->odp_actions->size;
3644
3645 if (!truncate) {
3646 const struct dpif_flow_stats *backup_resubmit_stats;
3647 struct xlate_cache *backup_xcache;
3648 struct flow_wildcards *backup_wc, wc;
3649 bool backup_side_effects;
3650 const struct dp_packet *backup_packet;
3651
3652 memset(&wc, 0 , sizeof wc);
3653 backup_wc = ctx->wc;
3654 ctx->wc = &wc;
3655 ctx->xin->wc = NULL;
3656 backup_resubmit_stats = ctx->xin->resubmit_stats;
3657 backup_xcache = ctx->xin->xcache;
3658 backup_side_effects = ctx->xin->allow_side_effects;
3659 backup_packet = ctx->xin->packet;
3660
3661 ctx->xin->resubmit_stats = NULL;
3662 ctx->xin->xcache = xlate_cache_new(); /* Use new temporary cache. */
3663 ctx->xin->allow_side_effects = false;
3664 ctx->xin->packet = NULL;
3665
3666 /* Push the cache entry for the tunnel first. */
3667 struct xc_entry *entry;
3668 entry = xlate_cache_add_entry(ctx->xin->xcache, XC_TUNNEL_HEADER);
3669 entry->tunnel_hdr.hdr_size = tnl_push_data.header_len;
3670 entry->tunnel_hdr.operation = ADD;
3671
3672 patch_port_output(ctx, xport, out_dev);
3673
3674 /* Similar to the stats update in revalidation, the x_cache entries
3675 * are populated by the previous translation are used to update the
3676 * stats correctly.
3677 */
3678 if (backup_resubmit_stats) {
3679 struct dpif_flow_stats stats = *backup_resubmit_stats;
3680 xlate_push_stats(ctx->xin->xcache, &stats);
3681 }
3682 xlate_cache_steal_entries(backup_xcache, ctx->xin->xcache);
3683
3684 if (ctx->odp_actions->size > push_action_size) {
3685 nl_msg_end_non_empty_nested(ctx->odp_actions, clone_ofs);
3686 } else {
3687 nl_msg_cancel_nested(ctx->odp_actions, clone_ofs);
3688 }
3689
3690 /* Restore context status. */
3691 ctx->xin->resubmit_stats = backup_resubmit_stats;
3692 xlate_cache_delete(ctx->xin->xcache);
3693 ctx->xin->xcache = backup_xcache;
3694 ctx->xin->allow_side_effects = backup_side_effects;
3695 ctx->xin->packet = backup_packet;
3696 ctx->wc = backup_wc;
3697 } else {
3698 /* In order to maintain accurate stats, use recirc for
3699 * natvie tunneling. */
3700 nl_msg_put_u32(ctx->odp_actions, OVS_ACTION_ATTR_RECIRC, 0);
3701 nl_msg_end_nested(ctx->odp_actions, clone_ofs);
3702 }
3703
3704 /* Restore the flows after the translation. */
3705 memcpy(&ctx->xin->flow, &old_flow, sizeof ctx->xin->flow);
3706 memcpy(&ctx->base_flow, &old_base_flow, sizeof ctx->base_flow);
3707
3708 /* Restore sFlow data. */
3709 ctx->sflow_n_outputs = sflow_n_outputs;
3710
3711 return 0;
3712 }
3713
3714 static void
3715 xlate_commit_actions(struct xlate_ctx *ctx)
3716 {
3717 bool use_masked = ctx->xbridge->support.masked_set_action;
3718
3719 ctx->xout->slow |= commit_odp_actions(&ctx->xin->flow, &ctx->base_flow,
3720 ctx->odp_actions, ctx->wc,
3721 use_masked, ctx->pending_encap,
3722 ctx->pending_decap, ctx->encap_data);
3723 ctx->pending_encap = false;
3724 ctx->pending_decap = false;
3725 ofpbuf_delete(ctx->encap_data);
3726 ctx->encap_data = NULL;
3727 }
3728
3729 static void
3730 clear_conntrack(struct xlate_ctx *ctx)
3731 {
3732 ctx->conntracked = false;
3733 flow_clear_conntrack(&ctx->xin->flow);
3734 }
3735
3736 static bool
3737 xlate_flow_is_protected(const struct xlate_ctx *ctx, const struct flow *flow, const struct xport *xport_out)
3738 {
3739 const struct xport *xport_in;
3740
3741 if (!xport_out) {
3742 return false;
3743 }
3744
3745 xport_in = get_ofp_port(ctx->xbridge, flow->in_port.ofp_port);
3746
3747 return (xport_in && xport_in->xbundle && xport_out->xbundle &&
3748 xport_in->xbundle->protected && xport_out->xbundle->protected);
3749 }
3750
3751 /* Function handles when a packet is sent from one bridge to another bridge.
3752 *
3753 * The bridges are internally connected, either with patch ports or with
3754 * tunnel ports.
3755 *
3756 * The output action to another bridge causes translation to continue within
3757 * the next bridge. This process can be recursive; the next bridge can
3758 * output yet to another bridge.
3759 *
3760 * The translated actions from the second bridge onwards are enclosed within
3761 * the clone action, so that any modification to the packet will not be visible
3762 * to the remaining actions of the originating bridge.
3763 */
3764 static void
3765 patch_port_output(struct xlate_ctx *ctx, const struct xport *in_dev,
3766 struct xport *out_dev)
3767 {
3768 struct flow *flow = &ctx->xin->flow;
3769 struct flow old_flow = ctx->xin->flow;
3770 struct flow_tnl old_flow_tnl_wc = ctx->wc->masks.tunnel;
3771 bool old_conntrack = ctx->conntracked;
3772 bool old_was_mpls = ctx->was_mpls;
3773 ovs_version_t old_version = ctx->xin->tables_version;
3774 struct ofpbuf old_stack = ctx->stack;
3775 uint8_t new_stack[1024];
3776 struct ofpbuf old_action_set = ctx->action_set;
3777 struct ovs_list *old_trace = ctx->xin->trace;
3778 uint64_t actset_stub[1024 / 8];
3779
3780 ofpbuf_use_stub(&ctx->stack, new_stack, sizeof new_stack);
3781 ofpbuf_use_stub(&ctx->action_set, actset_stub, sizeof actset_stub);
3782 flow->in_port.ofp_port = out_dev->ofp_port;
3783 flow->metadata = htonll(0);
3784 memset(&flow->tunnel, 0, sizeof flow->tunnel);
3785 memset(&ctx->wc->masks.tunnel, 0, sizeof ctx->wc->masks.tunnel);
3786 flow->tunnel.metadata.tab =
3787 ofproto_get_tun_tab(&out_dev->xbridge->ofproto->up);
3788 ctx->wc->masks.tunnel.metadata.tab = flow->tunnel.metadata.tab;
3789 memset(flow->regs, 0, sizeof flow->regs);
3790 flow->actset_output = OFPP_UNSET;
3791 clear_conntrack(ctx);
3792 ctx->xin->trace = xlate_report(ctx, OFT_BRIDGE, "bridge(\"%s\")",
3793 out_dev->xbridge->name);
3794 mirror_mask_t old_mirrors = ctx->mirrors;
3795 bool independent_mirrors = out_dev->xbridge != ctx->xbridge;
3796 if (independent_mirrors) {
3797 ctx->mirrors = 0;
3798 }
3799 ctx->xbridge = out_dev->xbridge;
3800
3801 /* The bridge is now known so obtain its table version. */
3802 ctx->xin->tables_version
3803 = ofproto_dpif_get_tables_version(ctx->xbridge->ofproto);
3804
3805 if (!process_special(ctx, out_dev) && may_receive(out_dev, ctx)) {
3806 if (xport_stp_forward_state(out_dev) &&
3807 xport_rstp_forward_state(out_dev)) {
3808 xlate_table_action(ctx, flow->in_port.ofp_port, 0, true, true,
3809 false, true, clone_xlate_actions);
3810 if (!ctx->freezing) {
3811 xlate_action_set(ctx);
3812 }
3813 if (ctx->freezing) {
3814 finish_freezing(ctx);
3815 }
3816 } else {
3817 /* Forwarding is disabled by STP and RSTP. Let OFPP_NORMAL and
3818 * the learning action look at the packet, then drop it. */
3819 struct flow old_base_flow = ctx->base_flow;
3820 size_t old_size = ctx->odp_actions->size;
3821 mirror_mask_t old_mirrors2 = ctx->mirrors;
3822
3823 xlate_table_action(ctx, flow->in_port.ofp_port, 0, true, true,
3824 false, true, clone_xlate_actions);
3825 ctx->mirrors = old_mirrors2;
3826 ctx->base_flow = old_base_flow;
3827 ctx->odp_actions->size = old_size;
3828
3829 /* Undo changes that may have been done for freezing. */
3830 ctx_cancel_freeze(ctx);
3831 }
3832 }
3833
3834 ctx->xin->trace = old_trace;
3835 if (independent_mirrors) {
3836 ctx->mirrors = old_mirrors;
3837 }
3838 ctx->xin->flow = old_flow;
3839 ctx->xbridge = in_dev->xbridge;
3840 ofpbuf_uninit(&ctx->action_set);
3841 ctx->action_set = old_action_set;
3842 ofpbuf_uninit(&ctx->stack);
3843 ctx->stack = old_stack;
3844
3845 /* Restore calling bridge's lookup version. */
3846 ctx->xin->tables_version = old_version;
3847
3848 /* Restore to calling bridge tunneling information */
3849 ctx->wc->masks.tunnel = old_flow_tnl_wc;
3850
3851 /* The out bridge popping MPLS should have no effect on the original
3852 * bridge. */
3853 ctx->was_mpls = old_was_mpls;
3854
3855 /* The out bridge's conntrack execution should have no effect on the
3856 * original bridge. */
3857 ctx->conntracked = old_conntrack;
3858
3859 /* The fact that the out bridge exits (for any reason) does not mean
3860 * that the original bridge should exit. Specifically, if the out
3861 * bridge freezes translation, the original bridge must continue
3862 * processing with the original, not the frozen packet! */
3863 ctx->exit = false;
3864
3865 /* Out bridge errors do not propagate back. */
3866 ctx->error = XLATE_OK;
3867
3868 if (ctx->xin->resubmit_stats) {
3869 netdev_vport_inc_tx(in_dev->netdev, ctx->xin->resubmit_stats);
3870 netdev_vport_inc_rx(out_dev->netdev, ctx->xin->resubmit_stats);
3871 if (out_dev->bfd) {
3872 bfd_account_rx(out_dev->bfd, ctx->xin->resubmit_stats);
3873 }
3874 }
3875 if (ctx->xin->xcache) {
3876 struct xc_entry *entry;
3877
3878 entry = xlate_cache_add_entry(ctx->xin->xcache, XC_NETDEV);
3879 entry->dev.tx = netdev_ref(in_dev->netdev);
3880 entry->dev.rx = netdev_ref(out_dev->netdev);
3881 entry->dev.bfd = bfd_ref(out_dev->bfd);
3882 }
3883 }
3884
3885 static bool
3886 check_output_prerequisites(struct xlate_ctx *ctx,
3887 const struct xport *xport,
3888 struct flow *flow,
3889 bool check_stp)
3890 {
3891 struct flow_wildcards *wc = ctx->wc;
3892
3893 if (!xport) {
3894 xlate_report(ctx, OFT_WARN, "Nonexistent output port");
3895 return false;
3896 } else if (xport->config & OFPUTIL_PC_NO_FWD) {
3897 xlate_report(ctx, OFT_DETAIL, "OFPPC_NO_FWD set, skipping output");
3898 return false;
3899 } else if (ctx->mirror_snaplen != 0 && xport->odp_port == ODPP_NONE) {
3900 xlate_report(ctx, OFT_WARN,
3901 "Mirror truncate to ODPP_NONE, skipping output");
3902 return false;
3903 } else if (xlate_flow_is_protected(ctx, flow, xport)) {
3904 xlate_report(ctx, OFT_WARN,
3905 "Flow is between protected ports, skipping output.");
3906 return false;
3907 } else if (check_stp) {
3908 if (is_stp(&ctx->base_flow)) {
3909 if (!xport_stp_should_forward_bpdu(xport) &&
3910 !xport_rstp_should_manage_bpdu(xport)) {
3911 if (ctx->xbridge->stp != NULL) {
3912 xlate_report(ctx, OFT_WARN,
3913 "STP not in listening state, "
3914 "skipping bpdu output");
3915 } else if (ctx->xbridge->rstp != NULL) {
3916 xlate_report(ctx, OFT_WARN,
3917 "RSTP not managing BPDU in this state, "
3918 "skipping bpdu output");
3919 }
3920 return false;
3921 }
3922 } else if ((xport->cfm && cfm_should_process_flow(xport->cfm, flow, wc))
3923 || (xport->bfd && bfd_should_process_flow(xport->bfd, flow,
3924 wc))) {
3925 /* Pass; STP should not block link health detection. */
3926 } else if (!xport_stp_forward_state(xport) ||
3927 !xport_rstp_forward_state(xport)) {
3928 if (ctx->xbridge->stp != NULL) {
3929 xlate_report(ctx, OFT_WARN,
3930 "STP not in forwarding state, skipping output");
3931 } else if (ctx->xbridge->rstp != NULL) {
3932 xlate_report(ctx, OFT_WARN,
3933 "RSTP not in forwarding state, skipping output");
3934 }
3935 return false;
3936 }
3937 }
3938
3939 if (xport->pt_mode == NETDEV_PT_LEGACY_L2 &&
3940 flow->packet_type != htonl(PT_ETH)) {
3941 xlate_report(ctx, OFT_WARN, "Trying to send non-Ethernet packet "
3942 "through legacy L2 port. Dropping packet.");
3943 return false;
3944 }
3945
3946 return true;
3947 }
3948
3949 /* Function verifies if destination address of received Neighbor Advertisement
3950 * message stored in 'flow' is correct. It should be either FF02::1:FFXX:XXXX
3951 * where XX:XXXX stands for the last 24 bits of 'ipv6_addr' or it should match
3952 * 'ipv6_addr'. */
3953 static bool
3954 is_nd_dst_correct(const struct flow *flow, const struct in6_addr *ipv6_addr)
3955 {
3956 const uint8_t *flow_ipv6_addr = (uint8_t *) &flow->ipv6_dst;
3957 const uint8_t *addr = (uint8_t *) ipv6_addr;
3958
3959 return (IN6_IS_ADDR_MC_LINKLOCAL(&flow->ipv6_dst) &&
3960 flow_ipv6_addr[11] == 0x01 &&
3961 flow_ipv6_addr[12] == 0xff &&
3962 flow_ipv6_addr[13] == addr[13] &&
3963 flow_ipv6_addr[14] == addr[14] &&
3964 flow_ipv6_addr[15] == addr[15]) ||
3965 IN6_ARE_ADDR_EQUAL(&flow->ipv6_dst, ipv6_addr);
3966 }
3967
3968 /* Function verifies if the ARP reply or Neighbor Advertisement represented by
3969 * 'flow' addresses the 'xbridge' of 'ctx'. Returns true if the ARP TA or
3970 * neighbor discovery destination is in the list of configured IP addresses of
3971 * the bridge. Otherwise, it returns false. */
3972 static bool
3973 is_neighbor_reply_correct(const struct xlate_ctx *ctx, const struct flow *flow)
3974 {
3975 bool ret = false;
3976 int i;
3977 struct xbridge_addr *xbridge_addr = xbridge_addr_ref(ctx->xbridge->addr);
3978
3979 /* Verify if 'nw_dst' of ARP or 'ipv6_dst' of ICMPV6 is in the list. */
3980 for (i = 0; xbridge_addr && i < xbridge_addr->n_addr; i++) {
3981 struct in6_addr *ip_addr = &xbridge_addr->addr[i];
3982 if ((IN6_IS_ADDR_V4MAPPED(ip_addr) &&
3983 flow->dl_type == htons(ETH_TYPE_ARP) &&
3984 in6_addr_get_mapped_ipv4(ip_addr) == flow->nw_dst) ||
3985 (!IN6_IS_ADDR_V4MAPPED(ip_addr) &&
3986 is_nd_dst_correct(flow, ip_addr))) {
3987 /* Found a match. */
3988 ret = true;
3989 break;
3990 }
3991 }
3992
3993 xbridge_addr_unref(xbridge_addr);
3994 return ret;
3995 }
3996
3997 static bool
3998 terminate_native_tunnel(struct xlate_ctx *ctx, ofp_port_t ofp_port,
3999 struct flow *flow, struct flow_wildcards *wc,
4000 odp_port_t *tnl_port)
4001 {
4002 *tnl_port = ODPP_NONE;
4003
4004 /* XXX: Write better Filter for tunnel port. We can use in_port
4005 * in tunnel-port flow to avoid these checks completely. */
4006 if (ofp_port == OFPP_LOCAL &&
4007 ovs_native_tunneling_is_on(ctx->xbridge->ofproto)) {
4008 *tnl_port = tnl_port_map_lookup(flow, wc);
4009
4010 /* If no tunnel port was found and it's about an ARP or ICMPv6 packet,
4011 * do tunnel neighbor snooping. */
4012 if (*tnl_port == ODPP_NONE &&
4013 (flow->dl_type == htons(ETH_TYPE_ARP) ||
4014 flow->nw_proto == IPPROTO_ICMPV6) &&
4015 is_neighbor_reply_correct(ctx, flow)) {
4016 tnl_neigh_snoop(flow, wc, ctx->xbridge->name);
4017 }
4018 }
4019
4020 return *tnl_port != ODPP_NONE;
4021 }
4022
4023 static void
4024 compose_output_action__(struct xlate_ctx *ctx, ofp_port_t ofp_port,
4025 const struct xlate_bond_recirc *xr, bool check_stp,
4026 bool is_last_action OVS_UNUSED, bool truncate)
4027 {
4028 const struct xport *xport = get_ofp_port(ctx->xbridge, ofp_port);
4029 struct flow_wildcards *wc = ctx->wc;
4030 struct flow *flow = &ctx->xin->flow;
4031 struct flow_tnl flow_tnl;
4032 union flow_vlan_hdr flow_vlans[FLOW_MAX_VLAN_HEADERS];
4033 uint8_t flow_nw_tos;
4034 odp_port_t out_port, odp_port, odp_tnl_port;
4035 bool is_native_tunnel = false;
4036 uint8_t dscp;
4037 struct eth_addr flow_dl_dst = flow->dl_dst;
4038 struct eth_addr flow_dl_src = flow->dl_src;
4039 ovs_be32 flow_packet_type = flow->packet_type;
4040 ovs_be16 flow_dl_type = flow->dl_type;
4041
4042 /* If 'struct flow' gets additional metadata, we'll need to zero it out
4043 * before traversing a patch port. */
4044 BUILD_ASSERT_DECL(FLOW_WC_SEQ == 41);
4045 memset(&flow_tnl, 0, sizeof flow_tnl);
4046
4047 if (!check_output_prerequisites(ctx, xport, flow, check_stp)) {
4048 return;
4049 }
4050
4051 if (flow->packet_type == htonl(PT_ETH)) {
4052 /* Strip Ethernet header for legacy L3 port. */
4053 if (xport->pt_mode == NETDEV_PT_LEGACY_L3) {
4054 flow->packet_type = PACKET_TYPE_BE(OFPHTN_ETHERTYPE,
4055 ntohs(flow->dl_type));
4056 }
4057 }
4058
4059 if (xport->peer) {
4060 if (truncate) {
4061 xlate_report_error(ctx, "Cannot truncate output to patch port");
4062 }
4063 patch_port_output(ctx, xport, xport->peer);
4064 return;
4065 }
4066
4067 memcpy(flow_vlans, flow->vlans, sizeof flow_vlans);
4068 flow_nw_tos = flow->nw_tos;
4069
4070 if (count_skb_priorities(xport)) {
4071 memset(&wc->masks.skb_priority, 0xff, sizeof wc->masks.skb_priority);
4072 if (dscp_from_skb_priority(xport, flow->skb_priority, &dscp)) {
4073 wc->masks.nw_tos |= IP_DSCP_MASK;
4074 flow->nw_tos &= ~IP_DSCP_MASK;
4075 flow->nw_tos |= dscp;
4076 }
4077 }
4078
4079 if (xport->is_tunnel) {
4080 struct in6_addr dst;
4081 /* Save tunnel metadata so that changes made due to
4082 * the Logical (tunnel) Port are not visible for any further
4083 * matches, while explicit set actions on tunnel metadata are.
4084 */
4085 flow_tnl = flow->tunnel;
4086 odp_port = tnl_port_send(xport->ofport, flow, ctx->wc);
4087 if (odp_port == ODPP_NONE) {
4088 xlate_report(ctx, OFT_WARN, "Tunneling decided against output");
4089 goto out; /* restore flow_nw_tos */
4090 }
4091 dst = flow_tnl_dst(&flow->tunnel);
4092 if (ipv6_addr_equals(&dst, &ctx->orig_tunnel_ipv6_dst)) {
4093 xlate_report(ctx, OFT_WARN, "Not tunneling to our own address");
4094 goto out; /* restore flow_nw_tos */
4095 }
4096 if (ctx->xin->resubmit_stats) {
4097 netdev_vport_inc_tx(xport->netdev, ctx->xin->resubmit_stats);
4098 }
4099 if (ctx->xin->xcache) {
4100 struct xc_entry *entry;
4101
4102 entry = xlate_cache_add_entry(ctx->xin->xcache, XC_NETDEV);
4103 entry->dev.tx = netdev_ref(xport->netdev);
4104 }
4105 out_port = odp_port;
4106 if (ovs_native_tunneling_is_on(ctx->xbridge->ofproto)) {
4107 xlate_report(ctx, OFT_DETAIL, "output to native tunnel");
4108 is_native_tunnel = true;
4109 } else {
4110 const char *tnl_type;
4111
4112 xlate_report(ctx, OFT_DETAIL, "output to kernel tunnel");
4113 tnl_type = tnl_port_get_type(xport->ofport);
4114 commit_odp_tunnel_action(flow, &ctx->base_flow,
4115 ctx->odp_actions, tnl_type);
4116 flow->tunnel = flow_tnl; /* Restore tunnel metadata */
4117 }
4118 } else {
4119 odp_port = xport->odp_port;
4120 out_port = odp_port;
4121 }
4122
4123 if (out_port != ODPP_NONE) {
4124 /* Commit accumulated flow updates before output. */
4125 xlate_commit_actions(ctx);
4126
4127 if (xr) {
4128 /* Recirculate the packet. */
4129 struct ovs_action_hash *act_hash;
4130
4131 /* Hash action. */
4132 enum ovs_hash_alg hash_alg = xr->hash_alg;
4133 if (hash_alg > ctx->xbridge->support.max_hash_alg) {
4134 /* Algorithm supported by all datapaths. */
4135 hash_alg = OVS_HASH_ALG_L4;
4136 }
4137 act_hash = nl_msg_put_unspec_uninit(ctx->odp_actions,
4138 OVS_ACTION_ATTR_HASH,
4139 sizeof *act_hash);
4140 act_hash->hash_alg = hash_alg;
4141 act_hash->hash_basis = xr->hash_basis;
4142
4143 /* Recirc action. */
4144 nl_msg_put_u32(ctx->odp_actions, OVS_ACTION_ATTR_RECIRC,
4145 xr->recirc_id);
4146 } else if (is_native_tunnel) {
4147 /* Output to native tunnel port. */
4148 native_tunnel_output(ctx, xport, flow, odp_port, truncate);
4149 flow->tunnel = flow_tnl; /* Restore tunnel metadata */
4150
4151 } else if (terminate_native_tunnel(ctx, ofp_port, flow, wc,
4152 &odp_tnl_port)) {
4153 /* Intercept packet to be received on native tunnel port. */
4154 nl_msg_put_odp_port(ctx->odp_actions, OVS_ACTION_ATTR_TUNNEL_POP,
4155 odp_tnl_port);
4156
4157 } else {
4158 /* Tunnel push-pop action is not compatible with
4159 * IPFIX action. */
4160 compose_ipfix_action(ctx, out_port);
4161
4162 /* Handle truncation of the mirrored packet. */
4163 if (ctx->mirror_snaplen > 0 &&
4164 ctx->mirror_snaplen < UINT16_MAX) {
4165 struct ovs_action_trunc *trunc;
4166
4167 trunc = nl_msg_put_unspec_uninit(ctx->odp_actions,
4168 OVS_ACTION_ATTR_TRUNC,
4169 sizeof *trunc);
4170 trunc->max_len = ctx->mirror_snaplen;
4171 if (!ctx->xbridge->support.trunc) {
4172 ctx->xout->slow |= SLOW_ACTION;
4173 }
4174 }
4175
4176 nl_msg_put_odp_port(ctx->odp_actions,
4177 OVS_ACTION_ATTR_OUTPUT,
4178 out_port);
4179 }
4180
4181 ctx->sflow_odp_port = odp_port;
4182 ctx->sflow_n_outputs++;
4183 ctx->nf_output_iface = ofp_port;
4184 }
4185
4186 if (mbridge_has_mirrors(ctx->xbridge->mbridge) && xport->xbundle) {
4187 mirror_packet(ctx, xport->xbundle,
4188 xbundle_mirror_dst(xport->xbundle->xbridge,
4189 xport->xbundle));
4190 }
4191
4192 out:
4193 /* Restore flow */
4194 memcpy(flow->vlans, flow_vlans, sizeof flow->vlans);
4195 flow->nw_tos = flow_nw_tos;
4196 flow->dl_dst = flow_dl_dst;
4197 flow->dl_src = flow_dl_src;
4198 flow->packet_type = flow_packet_type;
4199 flow->dl_type = flow_dl_type;
4200 }
4201
4202 static void
4203 compose_output_action(struct xlate_ctx *ctx, ofp_port_t ofp_port,
4204 const struct xlate_bond_recirc *xr,
4205 bool is_last_action, bool truncate)
4206 {
4207 compose_output_action__(ctx, ofp_port, xr, true,
4208 is_last_action, truncate);
4209 }
4210
4211 static void
4212 xlate_recursively(struct xlate_ctx *ctx, struct rule_dpif *rule,
4213 bool deepens, bool is_last_action,
4214 xlate_actions_handler *actions_xlator)
4215 {
4216 struct rule_dpif *old_rule = ctx->rule;
4217 ovs_be64 old_cookie = ctx->rule_cookie;
4218 const struct rule_actions *actions;
4219
4220 if (ctx->xin->resubmit_stats) {
4221 rule_dpif_credit_stats(rule, ctx->xin->resubmit_stats);
4222 }
4223
4224 ctx->resubmits++;
4225
4226 ctx->depth += deepens;
4227 ctx->rule = rule;
4228 ctx->rule_cookie = rule->up.flow_cookie;
4229 actions = rule_get_actions(&rule->up);
4230 actions_xlator(actions->ofpacts, actions->ofpacts_len, ctx,
4231 is_last_action, false);
4232 ctx->rule_cookie = old_cookie;
4233 ctx->rule = old_rule;
4234 ctx->depth -= deepens;
4235 }
4236
4237 static bool
4238 xlate_resubmit_resource_check(struct xlate_ctx *ctx)
4239 {
4240 if (ctx->depth >= MAX_DEPTH) {
4241 xlate_report_error(ctx, "over max translation depth %d", MAX_DEPTH);
4242 ctx->error = XLATE_RECURSION_TOO_DEEP;
4243 } else if (ctx->resubmits >= MAX_RESUBMITS) {
4244 xlate_report_error(ctx, "over %d resubmit actions", MAX_RESUBMITS);
4245 ctx->error = XLATE_TOO_MANY_RESUBMITS;
4246 } else if (ctx->odp_actions->size > UINT16_MAX) {
4247 xlate_report_error(ctx, "resubmits yielded over 64 kB of actions");
4248 /* NOT an error, as we'll be slow-pathing the flow in this case? */
4249 ctx->exit = true; /* XXX: translation still terminated! */
4250 } else if (ctx->stack.size >= 65536) {
4251 xlate_report_error(ctx, "resubmits yielded over 64 kB of stack");
4252 ctx->error = XLATE_STACK_TOO_DEEP;
4253 } else {
4254 return true;
4255 }
4256
4257 return false;
4258 }
4259
4260 static void
4261 tuple_swap_flow(struct flow *flow, bool ipv4)
4262 {
4263 uint8_t nw_proto = flow->nw_proto;
4264 flow->nw_proto = flow->ct_nw_proto;
4265 flow->ct_nw_proto = nw_proto;
4266
4267 if (ipv4) {
4268 ovs_be32 nw_src = flow->nw_src;
4269 flow->nw_src = flow->ct_nw_src;
4270 flow->ct_nw_src = nw_src;
4271
4272 ovs_be32 nw_dst = flow->nw_dst;
4273 flow->nw_dst = flow->ct_nw_dst;
4274 flow->ct_nw_dst = nw_dst;
4275 } else {
4276 struct in6_addr ipv6_src = flow->ipv6_src;
4277 flow->ipv6_src = flow->ct_ipv6_src;
4278 flow->ct_ipv6_src = ipv6_src;
4279
4280 struct in6_addr ipv6_dst = flow->ipv6_dst;
4281 flow->ipv6_dst = flow->ct_ipv6_dst;
4282 flow->ct_ipv6_dst = ipv6_dst;
4283 }
4284
4285 ovs_be16 tp_src = flow->tp_src;
4286 flow->tp_src = flow->ct_tp_src;
4287 flow->ct_tp_src = tp_src;
4288
4289 ovs_be16 tp_dst = flow->tp_dst;
4290 flow->tp_dst = flow->ct_tp_dst;
4291 flow->ct_tp_dst = tp_dst;
4292 }
4293
4294 static void
4295 tuple_swap(struct flow *flow, struct flow_wildcards *wc)
4296 {
4297 bool ipv4 = (flow->dl_type == htons(ETH_TYPE_IP));
4298
4299 tuple_swap_flow(flow, ipv4);
4300 tuple_swap_flow(&wc->masks, ipv4);
4301 }
4302
4303 static void
4304 xlate_table_action(struct xlate_ctx *ctx, ofp_port_t in_port, uint8_t table_id,
4305 bool may_packet_in, bool honor_table_miss,
4306 bool with_ct_orig, bool is_last_action,
4307 xlate_actions_handler *xlator)
4308 {
4309 /* Check if we need to recirculate before matching in a table. */
4310 if (ctx->was_mpls) {
4311 ctx_trigger_freeze(ctx);
4312 return;
4313 }
4314 if (xlate_resubmit_resource_check(ctx)) {
4315 uint8_t old_table_id = ctx->table_id;
4316 struct rule_dpif *rule;
4317
4318 ctx->table_id = table_id;
4319
4320 /* Swap packet fields with CT 5-tuple if requested. */
4321 if (with_ct_orig) {
4322 /* Do not swap if there is no CT tuple, or if key is not IP. */
4323 if (ctx->xin->flow.ct_nw_proto == 0 ||
4324 !is_ip_any(&ctx->xin->flow)) {
4325 xlate_report_error(ctx,
4326 "resubmit(ct) with non-tracked or non-IP packet!");
4327 return;
4328 }
4329 tuple_swap(&ctx->xin->flow, ctx->wc);
4330 }
4331 rule = rule_dpif_lookup_from_table(ctx->xbridge->ofproto,
4332 ctx->xin->tables_version,
4333 &ctx->xin->flow, ctx->wc,
4334 ctx->xin->resubmit_stats,
4335 &ctx->table_id, in_port,
4336 may_packet_in, honor_table_miss,
4337 ctx->xin->xcache);
4338 /* Swap back. */
4339 if (with_ct_orig) {
4340 tuple_swap(&ctx->xin->flow, ctx->wc);
4341 }
4342
4343 if (rule) {
4344 /* Fill in the cache entry here instead of xlate_recursively
4345 * to make the reference counting more explicit. We take a
4346 * reference in the lookups above if we are going to cache the
4347 * rule. */
4348 if (ctx->xin->xcache) {
4349 struct xc_entry *entry;
4350
4351 entry = xlate_cache_add_entry(ctx->xin->xcache, XC_RULE);
4352 entry->rule = rule;
4353 ofproto_rule_ref(&rule->up);
4354 }
4355
4356 struct ovs_list *old_trace = ctx->xin->trace;
4357 xlate_report_table(ctx, rule, table_id);
4358 xlate_recursively(ctx, rule, table_id <= old_table_id,
4359 is_last_action, xlator);
4360 ctx->xin->trace = old_trace;
4361 }
4362
4363 ctx->table_id = old_table_id;
4364 return;
4365 }
4366 }
4367
4368 /* Consumes the group reference, which is only taken if xcache exists. */
4369 static void
4370 xlate_group_stats(struct xlate_ctx *ctx, struct group_dpif *group,
4371 struct ofputil_bucket *bucket)
4372 {
4373 if (ctx->xin->resubmit_stats) {
4374 group_dpif_credit_stats(group, bucket, ctx->xin->resubmit_stats);
4375 }
4376 if (ctx->xin->xcache) {
4377 struct xc_entry *entry;
4378
4379 entry = xlate_cache_add_entry(ctx->xin->xcache, XC_GROUP);
4380 entry->group.group = group;
4381 entry->group.bucket = bucket;
4382 }
4383 }
4384
4385 static void
4386 xlate_group_bucket(struct xlate_ctx *ctx, struct ofputil_bucket *bucket,
4387 bool is_last_action)
4388 {
4389 struct ovs_list *old_trace = ctx->xin->trace;
4390 if (OVS_UNLIKELY(ctx->xin->trace)) {
4391 char *s = xasprintf("bucket %"PRIu32, bucket->bucket_id);
4392 ctx->xin->trace = &oftrace_report(ctx->xin->trace, OFT_BUCKET,
4393 s)->subs;
4394 free(s);
4395 }
4396
4397 uint64_t action_list_stub[1024 / 8];
4398 struct ofpbuf action_list = OFPBUF_STUB_INITIALIZER(action_list_stub);
4399 struct ofpbuf action_set = ofpbuf_const_initializer(bucket->ofpacts,
4400 bucket->ofpacts_len);
4401 struct flow old_flow = ctx->xin->flow;
4402 bool old_was_mpls = ctx->was_mpls;
4403
4404 ofpacts_execute_action_set(&action_list, &action_set);
4405 ctx->depth++;
4406 do_xlate_actions(action_list.data, action_list.size, ctx, is_last_action,
4407 true);
4408 ctx->depth--;
4409
4410 ofpbuf_uninit(&action_list);
4411
4412 /* Check if need to freeze. */
4413 if (ctx->freezing) {
4414 finish_freezing(ctx);
4415 }
4416
4417 /* Roll back flow to previous state.
4418 * This is equivalent to cloning the packet for each bucket.
4419 *
4420 * As a side effect any subsequently applied actions will
4421 * also effectively be applied to a clone of the packet taken
4422 * just before applying the all or indirect group.
4423 *
4424 * Note that group buckets are action sets, hence they cannot modify the
4425 * main action set. Also any stack actions are ignored when executing an
4426 * action set, so group buckets cannot change the stack either.
4427 * However, we do allow resubmit actions in group buckets, which could
4428 * break the above assumptions. It is up to the controller to not mess up
4429 * with the action_set and stack in the tables resubmitted to from
4430 * group buckets. */
4431 ctx->xin->flow = old_flow;
4432
4433 /* The group bucket popping MPLS should have no effect after bucket
4434 * execution. */
4435 ctx->was_mpls = old_was_mpls;
4436
4437 /* The fact that the group bucket exits (for any reason) does not mean that
4438 * the translation after the group action should exit. Specifically, if
4439 * the group bucket freezes translation, the actions after the group action
4440 * must continue processing with the original, not the frozen packet! */
4441 ctx->exit = false;
4442
4443 /* Context error in a bucket should not impact processing of other buckets
4444 * or actions. This is similar to cloning a packet for group buckets.
4445 * There is no need to restore the error back to old value due to the fact
4446 * that we actually processed group action which can happen only when there
4447 * is no previous context error.
4448 *
4449 * Exception to above is errors which are system limits to protect
4450 * translation from running too long or occupy too much space. These errors
4451 * should not be masked. XLATE_RECURSION_TOO_DEEP, XLATE_TOO_MANY_RESUBMITS
4452 * and XLATE_STACK_TOO_DEEP fall in this category. */
4453 if (ctx->error == XLATE_TOO_MANY_MPLS_LABELS ||
4454 ctx->error == XLATE_UNSUPPORTED_PACKET_TYPE) {
4455 /* reset the error and continue processing other buckets */
4456 ctx->error = XLATE_OK;
4457 }
4458
4459 ctx->xin->trace = old_trace;
4460 }
4461
4462 static struct ofputil_bucket *
4463 pick_ff_group(struct xlate_ctx *ctx, struct group_dpif *group)
4464 {
4465 return group_first_live_bucket(ctx, group, 0);
4466 }
4467
4468 static struct ofputil_bucket *
4469 pick_default_select_group(struct xlate_ctx *ctx, struct group_dpif *group)
4470 {
4471 flow_mask_hash_fields(&ctx->xin->flow, ctx->wc,
4472 NX_HASH_FIELDS_SYMMETRIC_L4);
4473 return group_best_live_bucket(ctx, group,
4474 flow_hash_symmetric_l4(&ctx->xin->flow, 0));
4475 }
4476
4477 static struct ofputil_bucket *
4478 pick_hash_fields_select_group(struct xlate_ctx *ctx, struct group_dpif *group)
4479 {
4480 const struct field_array *fields = &group->up.props.fields;
4481 const uint8_t *mask_values = fields->values;
4482 uint32_t basis = hash_uint64(group->up.props.selection_method_param);
4483
4484 size_t i;
4485 BITMAP_FOR_EACH_1 (i, MFF_N_IDS, fields->used.bm) {
4486 const struct mf_field *mf = mf_from_id(i);
4487
4488 /* Skip fields for which prerequisites are not met. */
4489 if (!mf_are_prereqs_ok(mf, &ctx->xin->flow, ctx->wc)) {
4490 /* Skip the mask bytes for this field. */
4491 mask_values += mf->n_bytes;
4492 continue;
4493 }
4494
4495 union mf_value value;
4496 union mf_value mask;
4497
4498 mf_get_value(mf, &ctx->xin->flow, &value);
4499 /* Mask the value. */
4500 for (int j = 0; j < mf->n_bytes; j++) {
4501 mask.b[j] = *mask_values++;
4502 value.b[j] &= mask.b[j];
4503 }
4504 basis = hash_bytes(&value, mf->n_bytes, basis);
4505
4506 /* For tunnels, hash in whether the field is present. */
4507 if (mf_is_tun_metadata(mf)) {
4508 basis = hash_boolean(mf_is_set(mf, &ctx->xin->flow), basis);
4509 }
4510
4511 mf_mask_field_masked(mf, &mask, ctx->wc);
4512 }
4513
4514 return group_best_live_bucket(ctx, group, basis);
4515 }
4516
4517 static struct ofputil_bucket *
4518 pick_dp_hash_select_group(struct xlate_ctx *ctx, struct group_dpif *group)
4519 {
4520 uint32_t dp_hash = ctx->xin->flow.dp_hash;
4521
4522 /* dp_hash value 0 is special since it means that the dp_hash has not been
4523 * computed, as all computed dp_hash values are non-zero. Therefore
4524 * compare to zero can be used to decide if the dp_hash value is valid
4525 * without masking the dp_hash field. */
4526 if (!dp_hash) {
4527 enum ovs_hash_alg hash_alg = group->hash_alg;
4528 if (hash_alg > ctx->xbridge->support.max_hash_alg) {
4529 /* Algorithm supported by all datapaths. */
4530 hash_alg = OVS_HASH_ALG_L4;
4531 }
4532 ctx_trigger_recirculate_with_hash(ctx, hash_alg, group->hash_basis);
4533 return NULL;
4534 } else {
4535 uint32_t hash_mask = group->hash_mask;
4536 ctx->wc->masks.dp_hash |= hash_mask;
4537
4538 /* Starting from the original masked dp_hash value iterate over the
4539 * hash mapping table to find the first live bucket. As the buckets
4540 * are quasi-randomly spread over the hash values, this maintains
4541 * a distribution according to bucket weights even when some buckets
4542 * are non-live. */
4543 for (int i = 0; i <= hash_mask; i++) {
4544 struct ofputil_bucket *b =
4545 group->hash_map[(dp_hash + i) & hash_mask];
4546 if (bucket_is_alive(ctx, b, 0)) {
4547 return b;
4548 }
4549 }
4550
4551 return NULL;
4552 }
4553 }
4554
4555 static struct ofputil_bucket *
4556 pick_select_group(struct xlate_ctx *ctx, struct group_dpif *group)
4557 {
4558 /* Select groups may access flow keys beyond L2 in order to
4559 * select a bucket. Recirculate as appropriate to make this possible.
4560 */
4561 if (ctx->was_mpls) {
4562 ctx_trigger_freeze(ctx);
4563 return NULL;
4564 }
4565
4566 switch (group->selection_method) {
4567 case SEL_METHOD_DEFAULT:
4568 return pick_default_select_group(ctx, group);
4569 break;
4570 case SEL_METHOD_HASH:
4571 return pick_hash_fields_select_group(ctx, group);
4572 break;
4573 case SEL_METHOD_DP_HASH:
4574 return pick_dp_hash_select_group(ctx, group);
4575 break;
4576 default:
4577 /* Parsing of groups ensures this never happens */
4578 OVS_NOT_REACHED();
4579 }
4580
4581 return NULL;
4582 }
4583
4584 static void
4585 xlate_group_action__(struct xlate_ctx *ctx, struct group_dpif *group,
4586 bool is_last_action)
4587 {
4588 if (group->up.type == OFPGT11_ALL || group->up.type == OFPGT11_INDIRECT) {
4589 struct ovs_list *last_bucket = group->up.buckets.prev;
4590 struct ofputil_bucket *bucket;
4591 LIST_FOR_EACH (bucket, list_node, &group->up.buckets) {
4592 bool is_last_bucket = &bucket->list_node == last_bucket;
4593 xlate_group_bucket(ctx, bucket, is_last_action && is_last_bucket);
4594 }
4595 xlate_group_stats(ctx, group, NULL);
4596 } else {
4597 struct ofputil_bucket *bucket;
4598 if (group->up.type == OFPGT11_SELECT) {
4599 bucket = pick_select_group(ctx, group);
4600 } else if (group->up.type == OFPGT11_FF) {
4601 bucket = pick_ff_group(ctx, group);
4602 } else {
4603 OVS_NOT_REACHED();
4604 }
4605
4606 if (bucket) {
4607 xlate_report(ctx, OFT_DETAIL, "using bucket %"PRIu32,
4608 bucket->bucket_id);
4609 xlate_group_bucket(ctx, bucket, is_last_action);
4610 xlate_group_stats(ctx, group, bucket);
4611 } else {
4612 xlate_report(ctx, OFT_DETAIL, "no live bucket");
4613 if (ctx->xin->xcache) {
4614 ofproto_group_unref(&group->up);
4615 }
4616 }
4617 }
4618 }
4619
4620 static bool
4621 xlate_group_action(struct xlate_ctx *ctx, uint32_t group_id,
4622 bool is_last_action)
4623 {
4624 if (xlate_resubmit_resource_check(ctx)) {
4625 struct group_dpif *group;
4626
4627 /* Take ref only if xcache exists. */
4628 group = group_dpif_lookup(ctx->xbridge->ofproto, group_id,
4629 ctx->xin->tables_version, ctx->xin->xcache);
4630 if (!group) {
4631 /* XXX: Should set ctx->error ? */
4632 xlate_report(ctx, OFT_WARN, "output to nonexistent group %"PRIu32,
4633 group_id);
4634 return true;
4635 }
4636 xlate_group_action__(ctx, group, is_last_action);
4637 }
4638
4639 return false;
4640 }
4641
4642 static void
4643 xlate_ofpact_resubmit(struct xlate_ctx *ctx,
4644 const struct ofpact_resubmit *resubmit,
4645 bool is_last_action)
4646 {
4647 ofp_port_t in_port;
4648 uint8_t table_id;
4649 bool may_packet_in = false;
4650 bool honor_table_miss = false;
4651
4652 if (ctx->rule && rule_dpif_is_internal(ctx->rule)) {
4653 /* Still allow missed packets to be sent to the controller
4654 * if resubmitting from an internal table. */
4655 may_packet_in = true;
4656 honor_table_miss = true;
4657 }
4658
4659 in_port = resubmit->in_port;
4660 if (in_port == OFPP_IN_PORT) {
4661 in_port = ctx->xin->flow.in_port.ofp_port;
4662 }
4663
4664 table_id = resubmit->table_id;
4665 if (table_id == 255) {
4666 table_id = ctx->table_id;
4667 }
4668
4669 xlate_table_action(ctx, in_port, table_id, may_packet_in,
4670 honor_table_miss, resubmit->with_ct_orig,
4671 is_last_action, do_xlate_actions);
4672 }
4673
4674 static void
4675 flood_packet_to_port(struct xlate_ctx *ctx, const struct xport *xport,
4676 bool all, bool is_last_action)
4677 {
4678 if (!xport) {
4679 return;
4680 }
4681
4682 if (all) {
4683 compose_output_action__(ctx, xport->ofp_port, NULL, false,
4684 is_last_action, false);
4685 } else {
4686 compose_output_action(ctx, xport->ofp_port, NULL, is_last_action,
4687 false);
4688 }
4689 }
4690
4691 static void
4692 flood_packets(struct xlate_ctx *ctx, bool all, bool is_last_action)
4693 {
4694 const struct xport *xport, *last = NULL;
4695
4696 /* Use 'last' the keep track of the last output port. */
4697 HMAP_FOR_EACH (xport, ofp_node, &ctx->xbridge->xports) {
4698 if (xport->ofp_port == ctx->xin->flow.in_port.ofp_port) {
4699 continue;
4700 }
4701
4702 if (all || !(xport->config & OFPUTIL_PC_NO_FLOOD)) {
4703 /* 'last' is not the last port, send a packet out, and
4704 * update 'last'. */
4705 flood_packet_to_port(ctx, last, all, false);
4706 last = xport;
4707 }
4708 }
4709
4710 /* Send the packet to the 'last' port. */
4711 flood_packet_to_port(ctx, last, all, is_last_action);
4712 ctx->nf_output_iface = NF_OUT_FLOOD;
4713 }
4714
4715 static void
4716 put_controller_user_action(struct xlate_ctx *ctx,
4717 bool dont_send, bool continuation,
4718 uint32_t recirc_id, int len,
4719 enum ofp_packet_in_reason reason,
4720 uint16_t controller_id)
4721 {
4722 struct user_action_cookie cookie;
4723
4724 memset(&cookie, 0, sizeof cookie);
4725 cookie.type = USER_ACTION_COOKIE_CONTROLLER;
4726 cookie.ofp_in_port = OFPP_NONE,
4727 cookie.ofproto_uuid = ctx->xbridge->ofproto->uuid;
4728 cookie.controller.dont_send = dont_send;
4729 cookie.controller.continuation = continuation;
4730 cookie.controller.reason = reason;
4731 cookie.controller.recirc_id = recirc_id;
4732 put_32aligned_be64(&cookie.controller.rule_cookie, ctx->rule_cookie);
4733 cookie.controller.controller_id = controller_id;
4734 cookie.controller.max_len = len;
4735
4736 odp_port_t odp_port = ofp_port_to_odp_port(ctx->xbridge,
4737 ctx->xin->flow.in_port.ofp_port);
4738 uint32_t pid = dpif_port_get_pid(ctx->xbridge->dpif, odp_port);
4739 odp_put_userspace_action(pid, &cookie, sizeof cookie, ODPP_NONE,
4740 false, ctx->odp_actions);
4741 }
4742
4743 static void
4744 xlate_controller_action(struct xlate_ctx *ctx, int len,
4745 enum ofp_packet_in_reason reason,
4746 uint16_t controller_id,
4747 uint32_t provider_meter_id,
4748 const uint8_t *userdata, size_t userdata_len)
4749 {
4750 xlate_commit_actions(ctx);
4751
4752 /* A packet sent by an action in a table-miss rule is considered an
4753 * explicit table miss. OpenFlow before 1.3 doesn't have that concept so
4754 * it will get translated back to OFPR_ACTION for those versions. */
4755 if (reason == OFPR_ACTION
4756 && ctx->rule && rule_is_table_miss(&ctx->rule->up)) {
4757 reason = OFPR_EXPLICIT_MISS;
4758 }
4759
4760 struct frozen_state state = {
4761 .table_id = ctx->table_id,
4762 .ofproto_uuid = ctx->xbridge->ofproto->uuid,
4763 .stack = ctx->stack.data,
4764 .stack_size = ctx->stack.size,
4765 .mirrors = ctx->mirrors,
4766 .conntracked = ctx->conntracked,
4767 .ofpacts = NULL,
4768 .ofpacts_len = 0,
4769 .action_set = NULL,
4770 .action_set_len = 0,
4771 .userdata = CONST_CAST(uint8_t *, userdata),
4772 .userdata_len = userdata_len,
4773 };
4774 frozen_metadata_from_flow(&state.metadata, &ctx->xin->flow);
4775
4776 uint32_t recirc_id = recirc_alloc_id_ctx(&state);
4777 if (!recirc_id) {
4778 xlate_report_error(ctx, "Failed to allocate recirculation id");
4779 ctx->error = XLATE_NO_RECIRCULATION_CONTEXT;
4780 return;
4781 }
4782 recirc_refs_add(&ctx->xout->recircs, recirc_id);
4783
4784 /* If the controller action didn't request a meter (indicated by a
4785 * 'meter_id' argument other than NX_CTLR_NO_METER), see if one was
4786 * configured through the "controller" virtual meter.
4787 *
4788 * Internally, ovs-vswitchd uses UINT32_MAX to indicate no meter is
4789 * configured. */
4790 uint32_t meter_id;
4791 if (provider_meter_id == UINT32_MAX) {
4792 meter_id = ctx->xbridge->ofproto->up.controller_meter_id;
4793 } else {
4794 meter_id = provider_meter_id;
4795 }
4796
4797 size_t offset;
4798 size_t ac_offset;
4799 if (meter_id != UINT32_MAX) {
4800 /* If controller meter is configured, generate clone(meter, userspace)
4801 * action. */
4802 offset = nl_msg_start_nested(ctx->odp_actions, OVS_ACTION_ATTR_SAMPLE);
4803 nl_msg_put_u32(ctx->odp_actions, OVS_SAMPLE_ATTR_PROBABILITY,
4804 UINT32_MAX);
4805 ac_offset = nl_msg_start_nested(ctx->odp_actions,
4806 OVS_SAMPLE_ATTR_ACTIONS);
4807 nl_msg_put_u32(ctx->odp_actions, OVS_ACTION_ATTR_METER, meter_id);
4808 }
4809
4810 /* Generate the datapath flows even if we don't send the packet-in
4811 * so that debugging more closely represents normal state. */
4812 bool dont_send = false;
4813 if (!ctx->xin->allow_side_effects && !ctx->xin->xcache) {
4814 dont_send = true;
4815 }
4816 put_controller_user_action(ctx, dont_send, false, recirc_id, len,
4817 reason, controller_id);
4818
4819 if (meter_id != UINT32_MAX) {
4820 nl_msg_end_nested(ctx->odp_actions, ac_offset);
4821 nl_msg_end_nested(ctx->odp_actions, offset);
4822 }
4823 }
4824
4825 /* Creates a frozen state, and allocates a unique recirc id for the given
4826 * state. Returns a non-zero recirc id if it is allocated successfully.
4827 * Returns 0 otherwise.
4828 **/
4829 static uint32_t
4830 finish_freezing__(struct xlate_ctx *ctx, uint8_t table)
4831 {
4832 ovs_assert(ctx->freezing);
4833
4834 struct frozen_state state = {
4835 .table_id = table,
4836 .ofproto_uuid = ctx->xbridge->ofproto->uuid,
4837 .stack = ctx->stack.data,
4838 .stack_size = ctx->stack.size,
4839 .mirrors = ctx->mirrors,
4840 .conntracked = ctx->conntracked,
4841 .xport_uuid = ctx->xin->xport_uuid,
4842 .ofpacts = ctx->frozen_actions.data,
4843 .ofpacts_len = ctx->frozen_actions.size,
4844 .action_set = ctx->action_set.data,
4845 .action_set_len = ctx->action_set.size,
4846 .userdata = ctx->pause ? CONST_CAST(uint8_t *,ctx->pause->userdata)
4847 : NULL,
4848 .userdata_len = ctx->pause ? ctx->pause->userdata_len : 0,
4849 };
4850 frozen_metadata_from_flow(&state.metadata, &ctx->xin->flow);
4851
4852 /* Allocate a unique recirc id for the given metadata state in the
4853 * flow. An existing id, with a new reference to the corresponding
4854 * recirculation context, will be returned if possible.
4855 * The life-cycle of this recirc id is managed by associating it
4856 * with the udpif key ('ukey') created for each new datapath flow. */
4857 uint32_t recirc_id = recirc_alloc_id_ctx(&state);
4858 if (!recirc_id) {
4859 xlate_report_error(ctx, "Failed to allocate recirculation id");
4860 ctx->error = XLATE_NO_RECIRCULATION_CONTEXT;
4861 return 0;
4862 }
4863 recirc_refs_add(&ctx->xout->recircs, recirc_id);
4864
4865 if (ctx->pause) {
4866 if (!ctx->xin->allow_side_effects && !ctx->xin->xcache) {
4867 return 0;
4868 }
4869
4870 put_controller_user_action(ctx, false, true, recirc_id,
4871 ctx->pause->max_len,
4872 ctx->pause->reason,
4873 ctx->pause->controller_id);
4874 } else {
4875 if (ctx->recirc_update_dp_hash) {
4876 struct ovs_action_hash *act_hash;
4877
4878 /* Hash action. */
4879 act_hash = nl_msg_put_unspec_uninit(ctx->odp_actions,
4880 OVS_ACTION_ATTR_HASH,
4881 sizeof *act_hash);
4882 act_hash->hash_alg = ctx->dp_hash_alg;
4883 act_hash->hash_basis = ctx->dp_hash_basis;
4884 }
4885 nl_msg_put_u32(ctx->odp_actions, OVS_ACTION_ATTR_RECIRC, recirc_id);
4886 }
4887
4888 /* Undo changes done by freezing. */
4889 ctx_cancel_freeze(ctx);
4890 return recirc_id;
4891 }
4892
4893 /* Called only when we're freezing. */
4894 static void
4895 finish_freezing(struct xlate_ctx *ctx)
4896 {
4897 xlate_commit_actions(ctx);
4898 finish_freezing__(ctx, 0);
4899 }
4900
4901 /* Fork the pipeline here. The current packet will continue processing the
4902 * current action list. A clone of the current packet will recirculate, skip
4903 * the remainder of the current action list and asynchronously resume pipeline
4904 * processing in 'table' with the current metadata and action set. */
4905 static void
4906 compose_recirculate_and_fork(struct xlate_ctx *ctx, uint8_t table,
4907 const uint16_t zone)
4908 {
4909 uint32_t recirc_id;
4910 ctx->freezing = true;
4911 recirc_id = finish_freezing__(ctx, table);
4912
4913 if (OVS_UNLIKELY(ctx->xin->trace) && recirc_id) {
4914 if (oftrace_add_recirc_node(ctx->xin->recirc_queue,
4915 OFT_RECIRC_CONNTRACK, &ctx->xin->flow,
4916 ctx->xin->packet, recirc_id, zone)) {
4917 xlate_report(ctx, OFT_DETAIL, "A clone of the packet is forked to "
4918 "recirculate. The forked pipeline will be resumed at "
4919 "table %u.", table);
4920 } else {
4921 xlate_report(ctx, OFT_DETAIL, "Failed to trace the conntrack "
4922 "forked pipeline with recirc_id = %d.", recirc_id);
4923 }
4924 }
4925 }
4926
4927 static void
4928 compose_mpls_push_action(struct xlate_ctx *ctx, struct ofpact_push_mpls *mpls)
4929 {
4930 struct flow *flow = &ctx->xin->flow;
4931 int n;
4932
4933 ovs_assert(eth_type_mpls(mpls->ethertype));
4934
4935 n = flow_count_mpls_labels(flow, ctx->wc);
4936 if (!n) {
4937 xlate_commit_actions(ctx);
4938 } else if (n >= FLOW_MAX_MPLS_LABELS) {
4939 if (ctx->xin->packet != NULL) {
4940 xlate_report_error(ctx, "dropping packet on which an MPLS push "
4941 "action can't be performed as it would have "
4942 "more MPLS LSEs than the %d supported.",
4943 FLOW_MAX_MPLS_LABELS);
4944 }
4945 ctx->error = XLATE_TOO_MANY_MPLS_LABELS;
4946 return;
4947 }
4948
4949 /* Update flow's MPLS stack, and clear L3/4 fields to mark them invalid. */
4950 flow_push_mpls(flow, n, mpls->ethertype, ctx->wc, true);
4951 }
4952
4953 static void
4954 compose_mpls_pop_action(struct xlate_ctx *ctx, ovs_be16 eth_type)
4955 {
4956 struct flow *flow = &ctx->xin->flow;
4957 int n = flow_count_mpls_labels(flow, ctx->wc);
4958
4959 if (flow_pop_mpls(flow, n, eth_type, ctx->wc)) {
4960 if (!eth_type_mpls(eth_type) && ctx->xbridge->support.odp.recirc) {
4961 ctx->was_mpls = true;
4962 }
4963 } else if (n >= FLOW_MAX_MPLS_LABELS) {
4964 if (ctx->xin->packet != NULL) {
4965 xlate_report_error(ctx, "dropping packet on which an "
4966 "MPLS pop action can't be performed as it has "
4967 "more MPLS LSEs than the %d supported.",
4968 FLOW_MAX_MPLS_LABELS);
4969 }
4970 ctx->error = XLATE_TOO_MANY_MPLS_LABELS;
4971 ofpbuf_clear(ctx->odp_actions);
4972 }
4973 }
4974
4975 static bool
4976 compose_dec_ttl(struct xlate_ctx *ctx, struct ofpact_cnt_ids *ids)
4977 {
4978 struct flow *flow = &ctx->xin->flow;
4979
4980 if (!is_ip_any(flow)) {
4981 return false;
4982 }
4983
4984 ctx->wc->masks.nw_ttl = 0xff;
4985 if (flow->nw_ttl > 1) {
4986 flow->nw_ttl--;
4987 return false;
4988 } else {
4989 size_t i;
4990
4991 for (i = 0; i < ids->n_controllers; i++) {
4992 xlate_controller_action(ctx, UINT16_MAX, OFPR_INVALID_TTL,
4993 ids->cnt_ids[i], UINT32_MAX, NULL, 0);
4994 }
4995
4996 /* Stop processing for current table. */
4997 xlate_report(ctx, OFT_WARN, "IPv%d decrement TTL exception",
4998 flow->dl_type == htons(ETH_TYPE_IP) ? 4 : 6);
4999 return true;
5000 }
5001 }
5002
5003 static void
5004 compose_set_mpls_label_action(struct xlate_ctx *ctx, ovs_be32 label)
5005 {
5006 if (eth_type_mpls(ctx->xin->flow.dl_type)) {
5007 ctx->wc->masks.mpls_lse[0] |= htonl(MPLS_LABEL_MASK);
5008 set_mpls_lse_label(&ctx->xin->flow.mpls_lse[0], label);
5009 }
5010 }
5011
5012 static void
5013 compose_set_mpls_tc_action(struct xlate_ctx *ctx, uint8_t tc)
5014 {
5015 if (eth_type_mpls(ctx->xin->flow.dl_type)) {
5016 ctx->wc->masks.mpls_lse[0] |= htonl(MPLS_TC_MASK);
5017 set_mpls_lse_tc(&ctx->xin->flow.mpls_lse[0], tc);
5018 }
5019 }
5020
5021 static bool
5022 compose_dec_nsh_ttl_action(struct xlate_ctx *ctx)
5023 {
5024 struct flow *flow = &ctx->xin->flow;
5025
5026 if ((flow->packet_type == htonl(PT_NSH)) ||
5027 (flow->dl_type == htons(ETH_TYPE_NSH))) {
5028 ctx->wc->masks.nsh.ttl = 0xff;
5029 if (flow->nsh.ttl > 1) {
5030 flow->nsh.ttl--;
5031 return false;
5032 } else {
5033 xlate_controller_action(ctx, UINT16_MAX, OFPR_INVALID_TTL,
5034 0, UINT32_MAX, NULL, 0);
5035 }
5036 }
5037
5038 /* Stop processing for current table. */
5039 xlate_report(ctx, OFT_WARN, "NSH decrement TTL exception");
5040 return true;
5041 }
5042
5043 static void
5044 compose_set_mpls_ttl_action(struct xlate_ctx *ctx, uint8_t ttl)
5045 {
5046 if (eth_type_mpls(ctx->xin->flow.dl_type)) {
5047 ctx->wc->masks.mpls_lse[0] |= htonl(MPLS_TTL_MASK);
5048 set_mpls_lse_ttl(&ctx->xin->flow.mpls_lse[0], ttl);
5049 }
5050 }
5051
5052 static bool
5053 compose_dec_mpls_ttl_action(struct xlate_ctx *ctx)
5054 {
5055 struct flow *flow = &ctx->xin->flow;
5056
5057 if (eth_type_mpls(flow->dl_type)) {
5058 uint8_t ttl = mpls_lse_to_ttl(flow->mpls_lse[0]);
5059
5060 ctx->wc->masks.mpls_lse[0] |= htonl(MPLS_TTL_MASK);
5061 if (ttl > 1) {
5062 ttl--;
5063 set_mpls_lse_ttl(&flow->mpls_lse[0], ttl);
5064 return false;
5065 } else {
5066 xlate_controller_action(ctx, UINT16_MAX, OFPR_INVALID_TTL, 0,
5067 UINT32_MAX, NULL, 0);
5068 }
5069 }
5070
5071 /* Stop processing for current table. */
5072 xlate_report(ctx, OFT_WARN, "MPLS decrement TTL exception");
5073 return true;
5074 }
5075
5076 /* Emits an action that outputs to 'port', within 'ctx'.
5077 *
5078 * 'controller_len' affects only packets sent to an OpenFlow controller. It
5079 * is the maximum number of bytes of the packet to send. UINT16_MAX means to
5080 * send the whole packet (and 0 means to omit the packet entirely).
5081 *
5082 * 'may_packet_in' determines whether the packet may be sent to an OpenFlow
5083 * controller. If it is false, then the packet is never sent to the OpenFlow
5084 * controller.
5085 *
5086 * 'is_last_action' should be true if this output is the last OpenFlow action
5087 * to be processed, which enables certain optimizations.
5088 *
5089 * 'truncate' should be true if the packet to be output is being truncated,
5090 * which suppresses certain optimizations. */
5091 static void
5092 xlate_output_action(struct xlate_ctx *ctx, ofp_port_t port,
5093 uint16_t controller_len, bool may_packet_in,
5094 bool is_last_action, bool truncate,
5095 bool group_bucket_action)
5096 {
5097 ofp_port_t prev_nf_output_iface = ctx->nf_output_iface;
5098
5099 ctx->nf_output_iface = NF_OUT_DROP;
5100
5101 switch (port) {
5102 case OFPP_IN_PORT:
5103 compose_output_action(ctx, ctx->xin->flow.in_port.ofp_port, NULL,
5104 is_last_action, truncate);
5105 break;
5106 case OFPP_TABLE:
5107 xlate_table_action(ctx, ctx->xin->flow.in_port.ofp_port,
5108 0, may_packet_in, true, false, false,
5109 do_xlate_actions);
5110 break;
5111 case OFPP_NORMAL:
5112 xlate_normal(ctx);
5113 break;
5114 case OFPP_FLOOD:
5115 flood_packets(ctx, false, is_last_action);
5116 break;
5117 case OFPP_ALL:
5118 flood_packets(ctx, true, is_last_action);
5119 break;
5120 case OFPP_CONTROLLER:
5121 xlate_controller_action(ctx, controller_len,
5122 (ctx->in_packet_out ? OFPR_PACKET_OUT
5123 : group_bucket_action ? OFPR_GROUP
5124 : ctx->in_action_set ? OFPR_ACTION_SET
5125 : OFPR_ACTION),
5126 0, UINT32_MAX, NULL, 0);
5127 break;
5128 case OFPP_NONE:
5129 break;
5130 case OFPP_LOCAL:
5131 default:
5132 if (port != ctx->xin->flow.in_port.ofp_port) {
5133 compose_output_action(ctx, port, NULL, is_last_action, truncate);
5134 } else {
5135 xlate_report_info(ctx, "skipping output to input port");
5136 }
5137 break;
5138 }
5139
5140 if (prev_nf_output_iface == NF_OUT_FLOOD) {
5141 ctx->nf_output_iface = NF_OUT_FLOOD;
5142 } else if (ctx->nf_output_iface == NF_OUT_DROP) {
5143 ctx->nf_output_iface = prev_nf_output_iface;
5144 } else if (prev_nf_output_iface != NF_OUT_DROP &&
5145 ctx->nf_output_iface != NF_OUT_FLOOD) {
5146 ctx->nf_output_iface = NF_OUT_MULTI;
5147 }
5148 }
5149
5150 static void
5151 xlate_output_reg_action(struct xlate_ctx *ctx,
5152 const struct ofpact_output_reg *or,
5153 bool is_last_action,
5154 bool group_bucket_action)
5155 {
5156 uint64_t port = mf_get_subfield(&or->src, &ctx->xin->flow);
5157 if (port <= UINT16_MAX) {
5158 xlate_report(ctx, OFT_DETAIL, "output port is %"PRIu64, port);
5159
5160 union mf_subvalue value;
5161
5162 memset(&value, 0xff, sizeof value);
5163 mf_write_subfield_flow(&or->src, &value, &ctx->wc->masks);
5164 xlate_output_action(ctx, u16_to_ofp(port), or->max_len,
5165 false, is_last_action, false,
5166 group_bucket_action);
5167 } else {
5168 xlate_report(ctx, OFT_WARN, "output port %"PRIu64" is out of range",
5169 port);
5170 }
5171 }
5172
5173 static void
5174 xlate_output_trunc_action(struct xlate_ctx *ctx,
5175 ofp_port_t port, uint32_t max_len,
5176 bool is_last_action,
5177 bool group_bucket_action)
5178 {
5179 bool support_trunc = ctx->xbridge->support.trunc;
5180 struct ovs_action_trunc *trunc;
5181 char name[OFP_MAX_PORT_NAME_LEN];
5182
5183 switch (port) {
5184 case OFPP_TABLE:
5185 case OFPP_NORMAL:
5186 case OFPP_FLOOD:
5187 case OFPP_ALL:
5188 case OFPP_CONTROLLER:
5189 case OFPP_NONE:
5190 ofputil_port_to_string(port, NULL, name, sizeof name);
5191 xlate_report(ctx, OFT_WARN,
5192 "output_trunc does not support port: %s", name);
5193 break;
5194 case OFPP_LOCAL:
5195 case OFPP_IN_PORT:
5196 default:
5197 if (port != ctx->xin->flow.in_port.ofp_port) {
5198 const struct xport *xport = get_ofp_port(ctx->xbridge, port);
5199
5200 if (xport == NULL || xport->odp_port == ODPP_NONE) {
5201 /* Since truncate happens at its following output action, if
5202 * the output port is a patch port, the behavior is somehow
5203 * unpredictable. For simplicity, disallow this case. */
5204 ofputil_port_to_string(port, NULL, name, sizeof name);
5205 xlate_report_error(ctx, "output_trunc does not support "
5206 "patch port %s", name);
5207 break;
5208 }
5209
5210 trunc = nl_msg_put_unspec_uninit(ctx->odp_actions,
5211 OVS_ACTION_ATTR_TRUNC,
5212 sizeof *trunc);
5213 trunc->max_len = max_len;
5214 xlate_output_action(ctx, port, 0, false, is_last_action, true,
5215 group_bucket_action);
5216 if (!support_trunc) {
5217 ctx->xout->slow |= SLOW_ACTION;
5218 }
5219 } else {
5220 xlate_report_info(ctx, "skipping output to input port");
5221 }
5222 break;
5223 }
5224 }
5225
5226 static void
5227 xlate_enqueue_action(struct xlate_ctx *ctx,
5228 const struct ofpact_enqueue *enqueue,
5229 bool is_last_action,
5230 bool group_bucket_action)
5231 {
5232 ofp_port_t ofp_port = enqueue->port;
5233 uint32_t queue_id = enqueue->queue;
5234 uint32_t flow_priority, priority;
5235 int error;
5236
5237 /* Translate queue to priority. */
5238 error = dpif_queue_to_priority(ctx->xbridge->dpif, queue_id, &priority);
5239 if (error) {
5240 /* Fall back to ordinary output action. */
5241 xlate_output_action(ctx, enqueue->port, 0, false,
5242 is_last_action, false,
5243 group_bucket_action);
5244 return;
5245 }
5246
5247 /* Check output port. */
5248 if (ofp_port == OFPP_IN_PORT) {
5249 ofp_port = ctx->xin->flow.in_port.ofp_port;
5250 } else if (ofp_port == ctx->xin->flow.in_port.ofp_port) {
5251 return;
5252 }
5253
5254 /* Add datapath actions. */
5255 flow_priority = ctx->xin->flow.skb_priority;
5256 ctx->xin->flow.skb_priority = priority;
5257 compose_output_action(ctx, ofp_port, NULL, is_last_action, false);
5258 ctx->xin->flow.skb_priority = flow_priority;
5259
5260 /* Update NetFlow output port. */
5261 if (ctx->nf_output_iface == NF_OUT_DROP) {
5262 ctx->nf_output_iface = ofp_port;
5263 } else if (ctx->nf_output_iface != NF_OUT_FLOOD) {
5264 ctx->nf_output_iface = NF_OUT_MULTI;
5265 }
5266 }
5267
5268 static void
5269 xlate_set_queue_action(struct xlate_ctx *ctx, uint32_t queue_id)
5270 {
5271 uint32_t skb_priority;
5272
5273 if (!dpif_queue_to_priority(ctx->xbridge->dpif, queue_id, &skb_priority)) {
5274 ctx->xin->flow.skb_priority = skb_priority;
5275 } else {
5276 /* Couldn't translate queue to a priority. Nothing to do. A warning
5277 * has already been logged. */
5278 }
5279 }
5280
5281 static bool
5282 slave_enabled_cb(ofp_port_t ofp_port, void *xbridge_)
5283 {
5284 const struct xbridge *xbridge = xbridge_;
5285 struct xport *port;
5286
5287 switch (ofp_port) {
5288 case OFPP_IN_PORT:
5289 case OFPP_TABLE:
5290 case OFPP_NORMAL:
5291 case OFPP_FLOOD:
5292 case OFPP_ALL:
5293 case OFPP_NONE:
5294 return true;
5295 case OFPP_CONTROLLER: /* Not supported by the bundle action. */
5296 return false;
5297 default:
5298 port = get_ofp_port(xbridge, ofp_port);
5299 return port ? port->may_enable : false;
5300 }
5301 }
5302
5303 static void
5304 xlate_bundle_action(struct xlate_ctx *ctx,
5305 const struct ofpact_bundle *bundle,
5306 bool is_last_action,
5307 bool group_bucket_action)
5308 {
5309 ofp_port_t port;
5310
5311 port = bundle_execute(bundle, &ctx->xin->flow, ctx->wc, slave_enabled_cb,
5312 CONST_CAST(struct xbridge *, ctx->xbridge));
5313 if (bundle->dst.field) {
5314 nxm_reg_load(&bundle->dst, ofp_to_u16(port), &ctx->xin->flow, ctx->wc);
5315 xlate_report_subfield(ctx, &bundle->dst);
5316 } else {
5317 xlate_output_action(ctx, port, 0, false, is_last_action, false,
5318 group_bucket_action);
5319 }
5320 }
5321
5322 static void
5323 xlate_learn_action(struct xlate_ctx *ctx, const struct ofpact_learn *learn)
5324 {
5325 learn_mask(learn, ctx->wc);
5326
5327 if (ctx->xin->xcache || ctx->xin->allow_side_effects) {
5328 uint64_t ofpacts_stub[1024 / 8];
5329 struct ofputil_flow_mod fm;
5330 struct ofproto_flow_mod ofm__, *ofm;
5331 struct ofpbuf ofpacts;
5332 enum ofperr error;
5333
5334 if (ctx->xin->xcache) {
5335 ofm = xmalloc(sizeof *ofm);
5336 } else {
5337 ofm = &ofm__;
5338 }
5339
5340 ofpbuf_use_stub(&ofpacts, ofpacts_stub, sizeof ofpacts_stub);
5341 learn_execute(learn, &ctx->xin->flow, &fm, &ofpacts);
5342 if (OVS_UNLIKELY(ctx->xin->trace)) {
5343 struct ds s = DS_EMPTY_INITIALIZER;
5344 ds_put_format(&s, "table=%"PRIu8" ", fm.table_id);
5345 minimatch_format(&fm.match,
5346 ofproto_get_tun_tab(&ctx->xin->ofproto->up),
5347 NULL, &s, OFP_DEFAULT_PRIORITY);
5348 ds_chomp(&s, ' ');
5349 ds_put_format(&s, " priority=%d", fm.priority);
5350 if (fm.new_cookie) {
5351 ds_put_format(&s, " cookie=%#"PRIx64, ntohll(fm.new_cookie));
5352 }
5353 if (fm.idle_timeout != OFP_FLOW_PERMANENT) {
5354 ds_put_format(&s, " idle=%"PRIu16, fm.idle_timeout);
5355 }
5356 if (fm.hard_timeout != OFP_FLOW_PERMANENT) {
5357 ds_put_format(&s, " hard=%"PRIu16, fm.hard_timeout);
5358 }
5359 if (fm.flags & NX_LEARN_F_SEND_FLOW_REM) {
5360 ds_put_cstr(&s, " send_flow_rem");
5361 }
5362 ds_put_cstr(&s, " actions=");
5363 struct ofpact_format_params fp = { .s = &s };
5364 ofpacts_format(fm.ofpacts, fm.ofpacts_len, &fp);
5365 xlate_report(ctx, OFT_DETAIL, "%s", ds_cstr(&s));
5366 ds_destroy(&s);
5367 }
5368 error = ofproto_dpif_flow_mod_init_for_learn(ctx->xbridge->ofproto,
5369 &fm, ofm);
5370 ofpbuf_uninit(&ofpacts);
5371
5372 if (!error) {
5373 bool success = true;
5374 if (ctx->xin->allow_side_effects) {
5375 error = ofproto_flow_mod_learn(ofm, ctx->xin->xcache != NULL,
5376 learn->limit, &success);
5377 } else if (learn->limit) {
5378 if (!ofm->temp_rule
5379 || ofm->temp_rule->state != RULE_INSERTED) {
5380 /* The learned rule expired and there are no packets, so
5381 * we cannot learn again. Since the translated actions
5382 * depend on the result of learning, we tell the caller
5383 * that there's no point in caching this result. */
5384 ctx->xout->avoid_caching = true;
5385 }
5386 }
5387
5388 if (learn->flags & NX_LEARN_F_WRITE_RESULT) {
5389 nxm_reg_load(&learn->result_dst, success ? 1 : 0,
5390 &ctx->xin->flow, ctx->wc);
5391 xlate_report_subfield(ctx, &learn->result_dst);
5392 }
5393
5394 if (success && ctx->xin->xcache) {
5395 struct xc_entry *entry;
5396
5397 entry = xlate_cache_add_entry(ctx->xin->xcache, XC_LEARN);
5398 entry->learn.ofm = ofm;
5399 entry->learn.limit = learn->limit;
5400 ofm = NULL;
5401 } else {
5402 ofproto_flow_mod_uninit(ofm);
5403 }
5404
5405 if (OVS_UNLIKELY(ctx->xin->trace && !success)) {
5406 xlate_report(ctx, OFT_DETAIL, "Limit exceeded, learn failed");
5407 }
5408 }
5409
5410 if (ofm != &ofm__) {
5411 free(ofm);
5412 }
5413
5414 if (error) {
5415 xlate_report_error(ctx, "LEARN action execution failed (%s).",
5416 ofperr_to_string(error));
5417 }
5418
5419 minimatch_destroy(&fm.match);
5420 } else {
5421 xlate_report(ctx, OFT_WARN,
5422 "suppressing side effects, so learn action ignored");
5423 }
5424 }
5425
5426 static void
5427 xlate_fin_timeout__(struct rule_dpif *rule, uint16_t tcp_flags,
5428 uint16_t idle_timeout, uint16_t hard_timeout)
5429 {
5430 if (tcp_flags & (TCP_FIN | TCP_RST)) {
5431 ofproto_rule_reduce_timeouts(&rule->up, idle_timeout, hard_timeout);
5432 }
5433 }
5434
5435 static void
5436 xlate_fin_timeout(struct xlate_ctx *ctx,
5437 const struct ofpact_fin_timeout *oft)
5438 {
5439 if (ctx->rule) {
5440 if (ctx->xin->allow_side_effects) {
5441 xlate_fin_timeout__(ctx->rule, ctx->xin->tcp_flags,
5442 oft->fin_idle_timeout, oft->fin_hard_timeout);
5443 }
5444 if (ctx->xin->xcache) {
5445 struct xc_entry *entry;
5446
5447 entry = xlate_cache_add_entry(ctx->xin->xcache, XC_FIN_TIMEOUT);
5448 /* XC_RULE already holds a reference on the rule, none is taken
5449 * here. */
5450 entry->fin.rule = ctx->rule;
5451 entry->fin.idle = oft->fin_idle_timeout;
5452 entry->fin.hard = oft->fin_hard_timeout;
5453 }
5454 }
5455 }
5456
5457 static void
5458 xlate_sample_action(struct xlate_ctx *ctx,
5459 const struct ofpact_sample *os)
5460 {
5461 odp_port_t output_odp_port = ODPP_NONE;
5462 odp_port_t tunnel_out_port = ODPP_NONE;
5463 struct dpif_ipfix *ipfix = ctx->xbridge->ipfix;
5464 bool emit_set_tunnel = false;
5465
5466 if (!ipfix || ctx->xin->flow.in_port.ofp_port == OFPP_NONE) {
5467 return;
5468 }
5469
5470 /* Scale the probability from 16-bit to 32-bit while representing
5471 * the same percentage. */
5472 uint32_t probability = (os->probability << 16) | os->probability;
5473
5474 /* If ofp_port in flow sample action is equel to ofp_port,
5475 * this sample action is a input port action. */
5476 if (os->sampling_port != OFPP_NONE &&
5477 os->sampling_port != ctx->xin->flow.in_port.ofp_port) {
5478 output_odp_port = ofp_port_to_odp_port(ctx->xbridge,
5479 os->sampling_port);
5480 if (output_odp_port == ODPP_NONE) {
5481 xlate_report_error(ctx, "can't use unknown port %d in flow sample "
5482 "action", os->sampling_port);
5483 return;
5484 }
5485
5486 if (dpif_ipfix_get_flow_exporter_tunnel_sampling(ipfix,
5487 os->collector_set_id)
5488 && dpif_ipfix_is_tunnel_port(ipfix, output_odp_port)) {
5489 tunnel_out_port = output_odp_port;
5490 emit_set_tunnel = true;
5491 }
5492 }
5493
5494 xlate_commit_actions(ctx);
5495 /* If 'emit_set_tunnel', sample(sampling_port=1) would translate
5496 * into datapath sample action set(tunnel(...)), sample(...) and
5497 * it is used for sampling egress tunnel information. */
5498 if (emit_set_tunnel) {
5499 const struct xport *xport = get_ofp_port(ctx->xbridge,
5500 os->sampling_port);
5501
5502 if (xport && xport->is_tunnel) {
5503 struct flow *flow = &ctx->xin->flow;
5504 tnl_port_send(xport->ofport, flow, ctx->wc);
5505 if (!ovs_native_tunneling_is_on(ctx->xbridge->ofproto)) {
5506 struct flow_tnl flow_tnl = flow->tunnel;
5507 const char *tnl_type;
5508
5509 tnl_type = tnl_port_get_type(xport->ofport);
5510 commit_odp_tunnel_action(flow, &ctx->base_flow,
5511 ctx->odp_actions, tnl_type);
5512 flow->tunnel = flow_tnl;
5513 }
5514 } else {
5515 xlate_report_error(ctx,
5516 "sampling_port:%d should be a tunnel port.",
5517 os->sampling_port);
5518 }
5519 }
5520
5521 struct user_action_cookie cookie = {
5522 .type = USER_ACTION_COOKIE_FLOW_SAMPLE,
5523 .ofp_in_port = ctx->xin->flow.in_port.ofp_port,
5524 .ofproto_uuid = ctx->xbridge->ofproto->uuid,
5525 .flow_sample = {
5526 .probability = os->probability,
5527 .collector_set_id = os->collector_set_id,
5528 .obs_domain_id = os->obs_domain_id,
5529 .obs_point_id = os->obs_point_id,
5530 .output_odp_port = output_odp_port,
5531 .direction = os->direction,
5532 }
5533 };
5534 compose_sample_action(ctx, probability, &cookie, tunnel_out_port, false);
5535 }
5536
5537 /* Determine if an datapath action translated from the openflow action
5538 * can be reversed by another datapath action.
5539 *
5540 * Openflow actions that do not emit datapath actions are trivially
5541 * reversible. Reversiblity of other actions depends on nature of
5542 * action and their translation. */
5543 static bool
5544 reversible_actions(const struct ofpact *ofpacts, size_t ofpacts_len)
5545 {
5546 const struct ofpact *a;
5547
5548 OFPACT_FOR_EACH (a, ofpacts, ofpacts_len) {
5549 switch (a->type) {
5550 case OFPACT_BUNDLE:
5551 case OFPACT_CLEAR_ACTIONS:
5552 case OFPACT_CLONE:
5553 case OFPACT_CONJUNCTION:
5554 case OFPACT_CONTROLLER:
5555 case OFPACT_CT_CLEAR:
5556 case OFPACT_DEBUG_RECIRC:
5557 case OFPACT_DEBUG_SLOW:
5558 case OFPACT_DEC_MPLS_TTL:
5559 case OFPACT_DEC_TTL:
5560 case OFPACT_ENQUEUE:
5561 case OFPACT_EXIT:
5562 case OFPACT_FIN_TIMEOUT:
5563 case OFPACT_GOTO_TABLE:
5564 case OFPACT_GROUP:
5565 case OFPACT_LEARN:
5566 case OFPACT_MULTIPATH:
5567 case OFPACT_NOTE:
5568 case OFPACT_OUTPUT:
5569 case OFPACT_OUTPUT_REG:
5570 case OFPACT_POP_MPLS:
5571 case OFPACT_POP_QUEUE:
5572 case OFPACT_PUSH_MPLS:
5573 case OFPACT_PUSH_VLAN:
5574 case OFPACT_REG_MOVE:
5575 case OFPACT_RESUBMIT:
5576 case OFPACT_SAMPLE:
5577 case OFPACT_SET_ETH_DST:
5578 case OFPACT_SET_ETH_SRC:
5579 case OFPACT_SET_FIELD:
5580 case OFPACT_SET_IP_DSCP:
5581 case OFPACT_SET_IP_ECN:
5582 case OFPACT_SET_IP_TTL:
5583 case OFPACT_SET_IPV4_DST:
5584 case OFPACT_SET_IPV4_SRC:
5585 case OFPACT_SET_L4_DST_PORT:
5586 case OFPACT_SET_L4_SRC_PORT:
5587 case OFPACT_SET_MPLS_LABEL:
5588 case OFPACT_SET_MPLS_TC:
5589 case OFPACT_SET_MPLS_TTL:
5590 case OFPACT_SET_QUEUE:
5591 case OFPACT_SET_TUNNEL:
5592 case OFPACT_SET_VLAN_PCP:
5593 case OFPACT_SET_VLAN_VID:
5594 case OFPACT_STACK_POP:
5595 case OFPACT_STACK_PUSH:
5596 case OFPACT_STRIP_VLAN:
5597 case OFPACT_UNROLL_XLATE:
5598 case OFPACT_WRITE_ACTIONS:
5599 case OFPACT_WRITE_METADATA:
5600 case OFPACT_CHECK_PKT_LARGER:
5601 break;
5602
5603 case OFPACT_CT:
5604 case OFPACT_METER:
5605 case OFPACT_NAT:
5606 case OFPACT_OUTPUT_TRUNC:
5607 case OFPACT_ENCAP:
5608 case OFPACT_DECAP:
5609 case OFPACT_DEC_NSH_TTL:
5610 return false;
5611 }
5612 }
5613 return true;
5614 }
5615
5616 static void
5617 clone_xlate_actions(const struct ofpact *actions, size_t actions_len,
5618 struct xlate_ctx *ctx, bool is_last_action,
5619 bool group_bucket_action OVS_UNUSED)
5620 {
5621 struct ofpbuf old_stack = ctx->stack;
5622 union mf_subvalue new_stack[1024 / sizeof(union mf_subvalue)];
5623 ofpbuf_use_stub(&ctx->stack, new_stack, sizeof new_stack);
5624 ofpbuf_put(&ctx->stack, old_stack.data, old_stack.size);
5625
5626 struct ofpbuf old_action_set = ctx->action_set;
5627 uint64_t actset_stub[1024 / 8];
5628 ofpbuf_use_stub(&ctx->action_set, actset_stub, sizeof actset_stub);
5629 ofpbuf_put(&ctx->action_set, old_action_set.data, old_action_set.size);
5630
5631 size_t offset, ac_offset;
5632 struct flow old_flow = ctx->xin->flow;
5633
5634 if (reversible_actions(actions, actions_len) || is_last_action) {
5635 old_flow = ctx->xin->flow;
5636 do_xlate_actions(actions, actions_len, ctx, is_last_action, false);
5637 if (!ctx->freezing) {
5638 xlate_action_set(ctx);
5639 }
5640 if (ctx->freezing) {
5641 finish_freezing(ctx);
5642 }
5643 goto xlate_done;
5644 }
5645
5646 /* Commit datapath actions before emitting the clone action to
5647 * avoid emitting those actions twice. Once inside
5648 * the clone, another time for the action after clone. */
5649 xlate_commit_actions(ctx);
5650 struct flow old_base = ctx->base_flow;
5651 bool old_was_mpls = ctx->was_mpls;
5652 bool old_conntracked = ctx->conntracked;
5653
5654 /* The actions are not reversible, a datapath clone action is
5655 * required to encode the translation. Select the clone action
5656 * based on datapath capabilities. */
5657 if (ctx->xbridge->support.clone) { /* Use clone action */
5658 /* Use clone action as datapath clone. */
5659 offset = nl_msg_start_nested(ctx->odp_actions, OVS_ACTION_ATTR_CLONE);
5660 do_xlate_actions(actions, actions_len, ctx, true, false);
5661 if (!ctx->freezing) {
5662 xlate_action_set(ctx);
5663 }
5664 if (ctx->freezing) {
5665 finish_freezing(ctx);
5666 }
5667 nl_msg_end_non_empty_nested(ctx->odp_actions, offset);
5668 goto dp_clone_done;
5669 }
5670
5671 if (ctx->xbridge->support.sample_nesting > 3) {
5672 /* Use sample action as datapath clone. */
5673 offset = nl_msg_start_nested(ctx->odp_actions, OVS_ACTION_ATTR_SAMPLE);
5674 ac_offset = nl_msg_start_nested(ctx->odp_actions,
5675 OVS_SAMPLE_ATTR_ACTIONS);
5676 do_xlate_actions(actions, actions_len, ctx, true, false);
5677 if (!ctx->freezing) {
5678 xlate_action_set(ctx);
5679 }
5680 if (ctx->freezing) {
5681 finish_freezing(ctx);
5682 }
5683 if (nl_msg_end_non_empty_nested(ctx->odp_actions, ac_offset)) {
5684 nl_msg_cancel_nested(ctx->odp_actions, offset);
5685 } else {
5686 nl_msg_put_u32(ctx->odp_actions, OVS_SAMPLE_ATTR_PROBABILITY,
5687 UINT32_MAX); /* 100% probability. */
5688 nl_msg_end_nested(ctx->odp_actions, offset);
5689 }
5690 goto dp_clone_done;
5691 }
5692
5693 /* Datapath does not support clone, skip xlate 'oc' and
5694 * report an error */
5695 xlate_report_error(ctx, "Failed to compose clone action");
5696
5697 dp_clone_done:
5698 /* The clone's conntrack execution should have no effect on the original
5699 * packet. */
5700 ctx->conntracked = old_conntracked;
5701
5702 /* Popping MPLS from the clone should have no effect on the original
5703 * packet. */
5704 ctx->was_mpls = old_was_mpls;
5705
5706 /* Restore the 'base_flow' for the next action. */
5707 ctx->base_flow = old_base;
5708
5709 xlate_done:
5710 ofpbuf_uninit(&ctx->action_set);
5711 ctx->action_set = old_action_set;
5712 ofpbuf_uninit(&ctx->stack);
5713 ctx->stack = old_stack;
5714 ctx->xin->flow = old_flow;
5715 }
5716
5717 static void
5718 compose_clone(struct xlate_ctx *ctx, const struct ofpact_nest *oc,
5719 bool is_last_action)
5720 {
5721 size_t oc_actions_len = ofpact_nest_get_action_len(oc);
5722
5723 clone_xlate_actions(oc->actions, oc_actions_len, ctx, is_last_action,
5724 false);
5725 }
5726
5727 static void
5728 xlate_meter_action(struct xlate_ctx *ctx, const struct ofpact_meter *meter)
5729 {
5730 if (meter->provider_meter_id != UINT32_MAX) {
5731 nl_msg_put_u32(ctx->odp_actions, OVS_ACTION_ATTR_METER,
5732 meter->provider_meter_id);
5733 }
5734 }
5735
5736 static bool
5737 may_receive(const struct xport *xport, struct xlate_ctx *ctx)
5738 {
5739 if (xport->config & (is_stp(&ctx->xin->flow)
5740 ? OFPUTIL_PC_NO_RECV_STP
5741 : OFPUTIL_PC_NO_RECV)) {
5742 return false;
5743 }
5744
5745 /* Only drop packets here if both forwarding and learning are
5746 * disabled. If just learning is enabled, we need to have
5747 * OFPP_NORMAL and the learning action have a look at the packet
5748 * before we can drop it. */
5749 if ((!xport_stp_forward_state(xport) && !xport_stp_learn_state(xport)) ||
5750 (!xport_rstp_forward_state(xport) && !xport_rstp_learn_state(xport))) {
5751 return false;
5752 }
5753
5754 return true;
5755 }
5756
5757 static void
5758 xlate_write_actions__(struct xlate_ctx *ctx,
5759 const struct ofpact *ofpacts, size_t ofpacts_len)
5760 {
5761 /* Maintain actset_output depending on the contents of the action set:
5762 *
5763 * - OFPP_UNSET, if there is no "output" action.
5764 *
5765 * - The output port, if there is an "output" action and no "group"
5766 * action.
5767 *
5768 * - OFPP_UNSET, if there is a "group" action.
5769 */
5770 if (!ctx->action_set_has_group) {
5771 const struct ofpact *a;
5772 OFPACT_FOR_EACH (a, ofpacts, ofpacts_len) {
5773 if (a->type == OFPACT_OUTPUT) {
5774 ctx->xin->flow.actset_output = ofpact_get_OUTPUT(a)->port;
5775 } else if (a->type == OFPACT_GROUP) {
5776 ctx->xin->flow.actset_output = OFPP_UNSET;
5777 ctx->action_set_has_group = true;
5778 break;
5779 }
5780 }
5781 }
5782
5783 ofpbuf_put(&ctx->action_set, ofpacts, ofpacts_len);
5784 }
5785
5786 static void
5787 xlate_write_actions(struct xlate_ctx *ctx, const struct ofpact_nest *a)
5788 {
5789 xlate_write_actions__(ctx, a->actions, ofpact_nest_get_action_len(a));
5790 }
5791
5792 static void
5793 xlate_action_set(struct xlate_ctx *ctx)
5794 {
5795 uint64_t action_list_stub[1024 / 8];
5796 struct ofpbuf action_list = OFPBUF_STUB_INITIALIZER(action_list_stub);
5797 ofpacts_execute_action_set(&action_list, &ctx->action_set);
5798 /* Clear the action set, as it is not needed any more. */
5799 ofpbuf_clear(&ctx->action_set);
5800 if (action_list.size) {
5801 ctx->in_action_set = true;
5802
5803 struct ovs_list *old_trace = ctx->xin->trace;
5804 ctx->xin->trace = xlate_report(ctx, OFT_TABLE,
5805 "--. Executing action set:");
5806 do_xlate_actions(action_list.data, action_list.size, ctx, true, false);
5807 ctx->xin->trace = old_trace;
5808
5809 ctx->in_action_set = false;
5810 }
5811 ofpbuf_uninit(&action_list);
5812 }
5813
5814 static void
5815 freeze_put_unroll_xlate(struct xlate_ctx *ctx)
5816 {
5817 struct ofpact_unroll_xlate *unroll = ctx->frozen_actions.header;
5818
5819 /* Restore the table_id and rule cookie for a potential PACKET
5820 * IN if needed. */
5821 if (!unroll ||
5822 (ctx->table_id != unroll->rule_table_id
5823 || ctx->rule_cookie != unroll->rule_cookie)) {
5824 unroll = ofpact_put_UNROLL_XLATE(&ctx->frozen_actions);
5825 unroll->rule_table_id = ctx->table_id;
5826 unroll->rule_cookie = ctx->rule_cookie;
5827 ctx->frozen_actions.header = unroll;
5828 }
5829 }
5830
5831
5832 /* Copy actions 'a' through 'end' to ctx->frozen_actions, which will be
5833 * executed after thawing. Inserts an UNROLL_XLATE action, if none is already
5834 * present, before any action that may depend on the current table ID or flow
5835 * cookie. */
5836 static void
5837 freeze_unroll_actions(const struct ofpact *a, const struct ofpact *end,
5838 struct xlate_ctx *ctx)
5839 {
5840 for (; a < end; a = ofpact_next(a)) {
5841 switch (a->type) {
5842 case OFPACT_OUTPUT_REG:
5843 case OFPACT_OUTPUT_TRUNC:
5844 case OFPACT_GROUP:
5845 case OFPACT_OUTPUT:
5846 case OFPACT_CONTROLLER:
5847 case OFPACT_DEC_MPLS_TTL:
5848 case OFPACT_DEC_NSH_TTL:
5849 case OFPACT_DEC_TTL:
5850 /* These actions may generate asynchronous messages, which include
5851 * table ID and flow cookie information. */
5852 freeze_put_unroll_xlate(ctx);
5853 break;
5854
5855 case OFPACT_RESUBMIT:
5856 if (ofpact_get_RESUBMIT(a)->table_id == 0xff) {
5857 /* This resubmit action is relative to the current table, so we
5858 * need to track what table that is.*/
5859 freeze_put_unroll_xlate(ctx);
5860 }
5861 break;
5862
5863 case OFPACT_SET_TUNNEL:
5864 case OFPACT_REG_MOVE:
5865 case OFPACT_SET_FIELD:
5866 case OFPACT_STACK_PUSH:
5867 case OFPACT_STACK_POP:
5868 case OFPACT_LEARN:
5869 case OFPACT_WRITE_METADATA:
5870 case OFPACT_GOTO_TABLE:
5871 case OFPACT_ENQUEUE:
5872 case OFPACT_SET_VLAN_VID:
5873 case OFPACT_SET_VLAN_PCP:
5874 case OFPACT_STRIP_VLAN:
5875 case OFPACT_PUSH_VLAN:
5876 case OFPACT_SET_ETH_SRC:
5877 case OFPACT_SET_ETH_DST:
5878 case OFPACT_SET_IPV4_SRC:
5879 case OFPACT_SET_IPV4_DST:
5880 case OFPACT_SET_IP_DSCP:
5881 case OFPACT_SET_IP_ECN:
5882 case OFPACT_SET_IP_TTL:
5883 case OFPACT_SET_L4_SRC_PORT:
5884 case OFPACT_SET_L4_DST_PORT:
5885 case OFPACT_SET_QUEUE:
5886 case OFPACT_POP_QUEUE:
5887 case OFPACT_PUSH_MPLS:
5888 case OFPACT_POP_MPLS:
5889 case OFPACT_SET_MPLS_LABEL:
5890 case OFPACT_SET_MPLS_TC:
5891 case OFPACT_SET_MPLS_TTL:
5892 case OFPACT_MULTIPATH:
5893 case OFPACT_BUNDLE:
5894 case OFPACT_EXIT:
5895 case OFPACT_UNROLL_XLATE:
5896 case OFPACT_FIN_TIMEOUT:
5897 case OFPACT_CLEAR_ACTIONS:
5898 case OFPACT_WRITE_ACTIONS:
5899 case OFPACT_METER:
5900 case OFPACT_SAMPLE:
5901 case OFPACT_CLONE:
5902 case OFPACT_ENCAP:
5903 case OFPACT_DECAP:
5904 case OFPACT_DEBUG_RECIRC:
5905 case OFPACT_DEBUG_SLOW:
5906 case OFPACT_CT:
5907 case OFPACT_CT_CLEAR:
5908 case OFPACT_NAT:
5909 case OFPACT_CHECK_PKT_LARGER:
5910 /* These may not generate PACKET INs. */
5911 break;
5912
5913 case OFPACT_NOTE:
5914 case OFPACT_CONJUNCTION:
5915 /* These need not be copied for restoration. */
5916 continue;
5917 }
5918 /* Copy the action over. */
5919 ofpbuf_put(&ctx->frozen_actions, a, OFPACT_ALIGN(a->len));
5920 }
5921 }
5922
5923 static void
5924 put_ct_mark(const struct flow *flow, struct ofpbuf *odp_actions,
5925 struct flow_wildcards *wc)
5926 {
5927 if (wc->masks.ct_mark) {
5928 struct {
5929 uint32_t key;
5930 uint32_t mask;
5931 } *odp_ct_mark;
5932
5933 odp_ct_mark = nl_msg_put_unspec_uninit(odp_actions, OVS_CT_ATTR_MARK,
5934 sizeof(*odp_ct_mark));
5935 odp_ct_mark->key = flow->ct_mark & wc->masks.ct_mark;
5936 odp_ct_mark->mask = wc->masks.ct_mark;
5937 }
5938 }
5939
5940 static void
5941 put_ct_label(const struct flow *flow, struct ofpbuf *odp_actions,
5942 struct flow_wildcards *wc)
5943 {
5944 if (!ovs_u128_is_zero(wc->masks.ct_label)) {
5945 struct {
5946 ovs_u128 key;
5947 ovs_u128 mask;
5948 } odp_ct_label;
5949
5950 odp_ct_label.key = ovs_u128_and(flow->ct_label, wc->masks.ct_label);
5951 odp_ct_label.mask = wc->masks.ct_label;
5952 nl_msg_put_unspec(odp_actions, OVS_CT_ATTR_LABELS,
5953 &odp_ct_label, sizeof odp_ct_label);
5954 }
5955 }
5956
5957 static void
5958 put_ct_helper(struct xlate_ctx *ctx,
5959 struct ofpbuf *odp_actions, struct ofpact_conntrack *ofc)
5960 {
5961 if (ofc->alg) {
5962 switch(ofc->alg) {
5963 case IPPORT_FTP:
5964 nl_msg_put_string(odp_actions, OVS_CT_ATTR_HELPER, "ftp");
5965 break;
5966 case IPPORT_TFTP:
5967 nl_msg_put_string(odp_actions, OVS_CT_ATTR_HELPER, "tftp");
5968 break;
5969 default:
5970 xlate_report_error(ctx, "cannot serialize ct_helper %d", ofc->alg);
5971 break;
5972 }
5973 }
5974 }
5975
5976 static void
5977 put_ct_nat(struct xlate_ctx *ctx)
5978 {
5979 struct ofpact_nat *ofn = ctx->ct_nat_action;
5980 size_t nat_offset;
5981
5982 if (!ofn) {
5983 return;
5984 }
5985
5986 nat_offset = nl_msg_start_nested(ctx->odp_actions, OVS_CT_ATTR_NAT);
5987 if (ofn->flags & NX_NAT_F_SRC || ofn->flags & NX_NAT_F_DST) {
5988 nl_msg_put_flag(ctx->odp_actions, ofn->flags & NX_NAT_F_SRC
5989 ? OVS_NAT_ATTR_SRC : OVS_NAT_ATTR_DST);
5990 if (ofn->flags & NX_NAT_F_PERSISTENT) {
5991 nl_msg_put_flag(ctx->odp_actions, OVS_NAT_ATTR_PERSISTENT);
5992 }
5993 if (ofn->flags & NX_NAT_F_PROTO_HASH) {
5994 nl_msg_put_flag(ctx->odp_actions, OVS_NAT_ATTR_PROTO_HASH);
5995 } else if (ofn->flags & NX_NAT_F_PROTO_RANDOM) {
5996 nl_msg_put_flag(ctx->odp_actions, OVS_NAT_ATTR_PROTO_RANDOM);
5997 }
5998 if (ofn->range_af == AF_INET) {
5999 nl_msg_put_be32(ctx->odp_actions, OVS_NAT_ATTR_IP_MIN,
6000 ofn->range.addr.ipv4.min);
6001 if (ofn->range.addr.ipv4.max &&
6002 (ntohl(ofn->range.addr.ipv4.max)
6003 > ntohl(ofn->range.addr.ipv4.min))) {
6004 nl_msg_put_be32(ctx->odp_actions, OVS_NAT_ATTR_IP_MAX,
6005 ofn->range.addr.ipv4.max);
6006 }
6007 } else if (ofn->range_af == AF_INET6) {
6008 nl_msg_put_unspec(ctx->odp_actions, OVS_NAT_ATTR_IP_MIN,
6009 &ofn->range.addr.ipv6.min,
6010 sizeof ofn->range.addr.ipv6.min);
6011 if (!ipv6_mask_is_any(&ofn->range.addr.ipv6.max) &&
6012 memcmp(&ofn->range.addr.ipv6.max, &ofn->range.addr.ipv6.min,
6013 sizeof ofn->range.addr.ipv6.max) > 0) {
6014 nl_msg_put_unspec(ctx->odp_actions, OVS_NAT_ATTR_IP_MAX,
6015 &ofn->range.addr.ipv6.max,
6016 sizeof ofn->range.addr.ipv6.max);
6017 }
6018 }
6019 if (ofn->range_af != AF_UNSPEC && ofn->range.proto.min) {
6020 nl_msg_put_u16(ctx->odp_actions, OVS_NAT_ATTR_PROTO_MIN,
6021 ofn->range.proto.min);
6022 if (ofn->range.proto.max &&
6023 ofn->range.proto.max > ofn->range.proto.min) {
6024 nl_msg_put_u16(ctx->odp_actions, OVS_NAT_ATTR_PROTO_MAX,
6025 ofn->range.proto.max);
6026 }
6027 }
6028 }
6029 nl_msg_end_nested(ctx->odp_actions, nat_offset);
6030 }
6031
6032 static void
6033 compose_conntrack_action(struct xlate_ctx *ctx, struct ofpact_conntrack *ofc,
6034 bool is_last_action)
6035 {
6036 ovs_u128 old_ct_label_mask = ctx->wc->masks.ct_label;
6037 uint32_t old_ct_mark_mask = ctx->wc->masks.ct_mark;
6038 size_t ct_offset;
6039 uint16_t zone;
6040
6041 /* Ensure that any prior actions are applied before composing the new
6042 * conntrack action. */
6043 xlate_commit_actions(ctx);
6044
6045 /* Process nested actions first, to populate the key. */
6046 ctx->ct_nat_action = NULL;
6047 ctx->wc->masks.ct_mark = 0;
6048 ctx->wc->masks.ct_label = OVS_U128_ZERO;
6049 do_xlate_actions(ofc->actions, ofpact_ct_get_action_len(ofc), ctx,
6050 is_last_action, false);
6051
6052 if (ofc->zone_src.field) {
6053 zone = mf_get_subfield(&ofc->zone_src, &ctx->xin->flow);
6054 } else {
6055 zone = ofc->zone_imm;
6056 }
6057
6058 ct_offset = nl_msg_start_nested(ctx->odp_actions, OVS_ACTION_ATTR_CT);
6059 if (ofc->flags & NX_CT_F_COMMIT) {
6060 nl_msg_put_flag(ctx->odp_actions, ofc->flags & NX_CT_F_FORCE ?
6061 OVS_CT_ATTR_FORCE_COMMIT : OVS_CT_ATTR_COMMIT);
6062 if (ctx->xbridge->support.ct_eventmask) {
6063 nl_msg_put_u32(ctx->odp_actions, OVS_CT_ATTR_EVENTMASK,
6064 OVS_CT_EVENTMASK_DEFAULT);
6065 }
6066 }
6067 nl_msg_put_u16(ctx->odp_actions, OVS_CT_ATTR_ZONE, zone);
6068 put_ct_mark(&ctx->xin->flow, ctx->odp_actions, ctx->wc);
6069 put_ct_label(&ctx->xin->flow, ctx->odp_actions, ctx->wc);
6070 put_ct_helper(ctx, ctx->odp_actions, ofc);
6071 put_ct_nat(ctx);
6072 ctx->ct_nat_action = NULL;
6073 nl_msg_end_nested(ctx->odp_actions, ct_offset);
6074
6075 ctx->wc->masks.ct_mark = old_ct_mark_mask;
6076 ctx->wc->masks.ct_label = old_ct_label_mask;
6077
6078 if (ofc->recirc_table != NX_CT_RECIRC_NONE) {
6079 ctx->conntracked = true;
6080 compose_recirculate_and_fork(ctx, ofc->recirc_table, zone);
6081 }
6082
6083 /* The ct_* fields are only available in the scope of the 'recirc_table'
6084 * call chain. */
6085 flow_clear_conntrack(&ctx->xin->flow);
6086 xlate_report(ctx, OFT_DETAIL, "Sets the packet to an untracked state, "
6087 "and clears all the conntrack fields.");
6088 ctx->conntracked = false;
6089 }
6090
6091 static void
6092 compose_ct_clear_action(struct xlate_ctx *ctx)
6093 {
6094 clear_conntrack(ctx);
6095 /* This action originally existed without dpif support. So to preserve
6096 * compatibility, only append it if the dpif supports it. */
6097 if (ctx->xbridge->support.ct_clear) {
6098 nl_msg_put_flag(ctx->odp_actions, OVS_ACTION_ATTR_CT_CLEAR);
6099 }
6100 }
6101
6102 /* check_pkt_larger action checks the packet length and stores the
6103 * result in the register bit. We translate this action to the
6104 * datapath action - 'check_pkt_len' whose format
6105 * is: 'check_pkt_len(pkt_len, ge(actions), le(actions))'.
6106 *
6107 * We first set the destination register bit to 1 and call
6108 * 'do_xlate_actions' for the case - packet len greater than
6109 * the specified packet length.
6110 *
6111 * We then set the destination register bit to 0 and call
6112 * 'do_xlate_actions' for the case - packet length is lesser or
6113 * equal to the specified packet length.
6114 *
6115 * It is possible for freezing to happen for both the cases.
6116 */
6117 static void
6118 xlate_check_pkt_larger(struct xlate_ctx *ctx,
6119 struct ofpact_check_pkt_larger *check_pkt_larger,
6120 const struct ofpact *remaining_acts,
6121 size_t remaining_acts_len)
6122 {
6123 union mf_subvalue value;
6124 memset(&value, 0, sizeof value);
6125 if (!ctx->xbridge->support.check_pkt_len) {
6126 uint8_t is_pkt_larger = 0;
6127 if (ctx->xin->packet) {
6128 is_pkt_larger =
6129 dp_packet_size(ctx->xin->packet) > check_pkt_larger->pkt_len;
6130 }
6131 value.u8_val = is_pkt_larger;
6132 mf_write_subfield_flow(&check_pkt_larger->dst, &value,
6133 &ctx->xin->flow);
6134 /* If datapath doesn't support check_pkt_len action, then set the
6135 * SLOW_ACTION flag. If we don't set SLOW_ACTION, we
6136 * will push a flow to the datapath based on the packet length
6137 * in ctx->xin->packet. For subsequent patches which match the
6138 * same flow, datapath will apply the actions without considering
6139 * the packet length. This results in wrong actions being applied.
6140 */
6141 ctx->xout->slow |= SLOW_ACTION;
6142 return;
6143 }
6144
6145 struct ofpbuf old_stack = ctx->stack;
6146 union mf_subvalue new_stack[1024 / sizeof(union mf_subvalue)];
6147 ofpbuf_use_stub(&ctx->stack, new_stack, sizeof new_stack);
6148 ofpbuf_put(&ctx->stack, old_stack.data, old_stack.size);
6149
6150 struct ofpbuf old_action_set = ctx->action_set;
6151 uint64_t actset_stub[1024 / 8];
6152 ofpbuf_use_stub(&ctx->action_set, actset_stub, sizeof actset_stub);
6153 ofpbuf_put(&ctx->action_set, old_action_set.data, old_action_set.size);
6154
6155 struct flow old_flow = ctx->xin->flow;
6156 xlate_commit_actions(ctx);
6157 struct flow old_base = ctx->base_flow;
6158 bool old_was_mpls = ctx->was_mpls;
6159 bool old_conntracked = ctx->conntracked;
6160
6161 size_t offset = nl_msg_start_nested(ctx->odp_actions,
6162 OVS_ACTION_ATTR_CHECK_PKT_LEN);
6163 nl_msg_put_u16(ctx->odp_actions, OVS_CHECK_PKT_LEN_ATTR_PKT_LEN,
6164 check_pkt_larger->pkt_len);
6165 size_t offset_attr = nl_msg_start_nested(
6166 ctx->odp_actions, OVS_CHECK_PKT_LEN_ATTR_ACTIONS_IF_GREATER);
6167 value.u8_val = 1;
6168 mf_write_subfield_flow(&check_pkt_larger->dst, &value, &ctx->xin->flow);
6169 do_xlate_actions(remaining_acts, remaining_acts_len, ctx, true, false);
6170 if (!ctx->freezing) {
6171 xlate_action_set(ctx);
6172 }
6173 if (ctx->freezing) {
6174 finish_freezing(ctx);
6175 }
6176 nl_msg_end_nested(ctx->odp_actions, offset_attr);
6177
6178 ctx->base_flow = old_base;
6179 ctx->was_mpls = old_was_mpls;
6180 ctx->conntracked = old_conntracked;
6181 ctx->xin->flow = old_flow;
6182
6183 /* If the flow translation for the IF_GREATER case requires freezing,
6184 * then ctx->exit would be true. Reset to false so that we can
6185 * do flow translation for 'IF_LESS_EQUAL' case. finish_freezing()
6186 * would have taken care of Undoing the changes done for freeze. */
6187 ctx->exit = false;
6188
6189 offset_attr = nl_msg_start_nested(
6190 ctx->odp_actions, OVS_CHECK_PKT_LEN_ATTR_ACTIONS_IF_LESS_EQUAL);
6191 value.u8_val = 0;
6192 mf_write_subfield_flow(&check_pkt_larger->dst, &value, &ctx->xin->flow);
6193 do_xlate_actions(remaining_acts, remaining_acts_len, ctx, true, false);
6194 if (!ctx->freezing) {
6195 xlate_action_set(ctx);
6196 }
6197 if (ctx->freezing) {
6198 finish_freezing(ctx);
6199 }
6200 nl_msg_end_nested(ctx->odp_actions, offset_attr);
6201 nl_msg_end_nested(ctx->odp_actions, offset);
6202
6203 ofpbuf_uninit(&ctx->action_set);
6204 ctx->action_set = old_action_set;
6205 ofpbuf_uninit(&ctx->stack);
6206 ctx->stack = old_stack;
6207 ctx->base_flow = old_base;
6208 ctx->was_mpls = old_was_mpls;
6209 ctx->conntracked = old_conntracked;
6210 ctx->xin->flow = old_flow;
6211 ctx->exit = true;
6212 }
6213
6214 static void
6215 rewrite_flow_encap_ethernet(struct xlate_ctx *ctx,
6216 struct flow *flow,
6217 struct flow_wildcards *wc)
6218 {
6219 wc->masks.packet_type = OVS_BE32_MAX;
6220 if (pt_ns(flow->packet_type) == OFPHTN_ETHERTYPE) {
6221 /* Only adjust the packet_type and zero the dummy Ethernet addresses. */
6222 ovs_be16 ethertype = pt_ns_type_be(flow->packet_type);
6223 flow->packet_type = htonl(PT_ETH);
6224 flow->dl_src = eth_addr_zero;
6225 flow->dl_dst = eth_addr_zero;
6226 flow->dl_type = ethertype;
6227 } else {
6228 /* Error handling: drop packet. */
6229 xlate_report_debug(ctx, OFT_ACTION,
6230 "Dropping packet as encap(ethernet) is not "
6231 "supported for packet type ethernet.");
6232 ctx->error = XLATE_UNSUPPORTED_PACKET_TYPE;
6233 }
6234 }
6235
6236 /* For an MD2 NSH header returns a pointer to an ofpbuf with the encoded
6237 * MD2 TLVs provided as encap properties to the encap operation. This
6238 * will be stored as encap_data in the ctx and copied into the push_nsh
6239 * action at the next commit. */
6240 static struct ofpbuf *
6241 rewrite_flow_push_nsh(struct xlate_ctx *ctx,
6242 const struct ofpact_encap *encap,
6243 struct flow *flow,
6244 struct flow_wildcards *wc)
6245 {
6246 ovs_be32 packet_type = flow->packet_type;
6247 const char *ptr = (char *) encap->props;
6248 struct ofpbuf *buf = ofpbuf_new(NSH_CTX_HDRS_MAX_LEN);
6249 uint8_t md_type = NSH_M_TYPE1;
6250 uint8_t np = 0;
6251 int i;
6252
6253 /* Scan the optional NSH encap TLV properties, if any. */
6254 for (i = 0; i < encap->n_props; i++) {
6255 struct ofpact_ed_prop *prop_ptr =
6256 ALIGNED_CAST(struct ofpact_ed_prop *, ptr);
6257 if (prop_ptr->prop_class == OFPPPC_NSH) {
6258 switch (prop_ptr->type) {
6259 case OFPPPT_PROP_NSH_MDTYPE: {
6260 struct ofpact_ed_prop_nsh_md_type *prop_md_type =
6261 ALIGNED_CAST(struct ofpact_ed_prop_nsh_md_type *,
6262 prop_ptr);
6263 md_type = prop_md_type->md_type;
6264 break;
6265 }
6266 case OFPPPT_PROP_NSH_TLV: {
6267 struct ofpact_ed_prop_nsh_tlv *tlv_prop =
6268 ALIGNED_CAST(struct ofpact_ed_prop_nsh_tlv *,
6269 prop_ptr);
6270 struct nsh_md2_tlv *md2_ctx =
6271 ofpbuf_put_uninit(buf, sizeof(*md2_ctx));
6272 md2_ctx->md_class = tlv_prop->tlv_class;
6273 md2_ctx->type = tlv_prop->tlv_type;
6274 md2_ctx->length = tlv_prop->tlv_len;
6275 size_t len = ROUND_UP(md2_ctx->length, 4);
6276 size_t padding = len - md2_ctx->length;
6277 ofpbuf_put(buf, tlv_prop->data, md2_ctx->length);
6278 ofpbuf_put_zeros(buf, padding);
6279 break;
6280 }
6281 default:
6282 /* No other NSH encap properties defined yet. */
6283 break;
6284 }
6285 }
6286 ptr += ROUND_UP(prop_ptr->len, 8);
6287 }
6288 if (buf->size == 0 || buf->size > NSH_CTX_HDRS_MAX_LEN) {
6289 ofpbuf_delete(buf);
6290 buf = NULL;
6291 }
6292
6293 /* Determine the Next Protocol field for NSH header. */
6294 switch (ntohl(packet_type)) {
6295 case PT_ETH:
6296 np = NSH_P_ETHERNET;
6297 break;
6298 case PT_IPV4:
6299 np = NSH_P_IPV4;
6300 break;
6301 case PT_IPV6:
6302 np = NSH_P_IPV6;
6303 break;
6304 case PT_NSH:
6305 np = NSH_P_NSH;
6306 break;
6307 default:
6308 /* Error handling: drop packet. */
6309 xlate_report_debug(ctx, OFT_ACTION,
6310 "Dropping packet as encap(nsh) is not "
6311 "supported for packet type (%d,0x%x)",
6312 pt_ns(packet_type), pt_ns_type(packet_type));
6313 ctx->error = XLATE_UNSUPPORTED_PACKET_TYPE;
6314 return buf;
6315 }
6316 /* Note that we have matched on packet_type! */
6317 wc->masks.packet_type = OVS_BE32_MAX;
6318
6319 /* Reset all current flow packet headers. */
6320 memset(&flow->dl_dst, 0,
6321 sizeof(struct flow) - offsetof(struct flow, dl_dst));
6322
6323 /* Populate the flow with the new NSH header. */
6324 flow->packet_type = htonl(PT_NSH);
6325 flow->dl_type = htons(ETH_TYPE_NSH);
6326 flow->nsh.flags = 0;
6327 flow->nsh.ttl = 63;
6328 flow->nsh.np = np;
6329 flow->nsh.path_hdr = htonl(255);
6330
6331 if (md_type == NSH_M_TYPE1) {
6332 flow->nsh.mdtype = NSH_M_TYPE1;
6333 memset(flow->nsh.context, 0, sizeof flow->nsh.context);
6334 if (buf) {
6335 /* Drop any MD2 context TLVs. */
6336 ofpbuf_delete(buf);
6337 buf = NULL;
6338 }
6339 } else if (md_type == NSH_M_TYPE2) {
6340 flow->nsh.mdtype = NSH_M_TYPE2;
6341 }
6342 flow->nsh.mdtype &= NSH_MDTYPE_MASK;
6343
6344 return buf;
6345 }
6346
6347 static void
6348 xlate_generic_encap_action(struct xlate_ctx *ctx,
6349 const struct ofpact_encap *encap)
6350 {
6351 struct flow *flow = &ctx->xin->flow;
6352 struct flow_wildcards *wc = ctx->wc;
6353 struct ofpbuf *encap_data = NULL;
6354
6355 /* Ensure that any pending actions on the inner packet are applied before
6356 * rewriting the flow */
6357 xlate_commit_actions(ctx);
6358
6359 /* Rewrite the flow to reflect the effect of pushing the new encap header. */
6360 switch (ntohl(encap->new_pkt_type)) {
6361 case PT_ETH:
6362 rewrite_flow_encap_ethernet(ctx, flow, wc);
6363 break;
6364 case PT_NSH:
6365 encap_data = rewrite_flow_push_nsh(ctx, encap, flow, wc);
6366 break;
6367 default:
6368 /* New packet type was checked during decoding. */
6369 OVS_NOT_REACHED();
6370 }
6371
6372 if (!ctx->error) {
6373 /* The actual encap datapath action will be generated at next commit. */
6374 ctx->pending_encap = true;
6375 ctx->encap_data = encap_data;
6376 }
6377 }
6378
6379 /* Returns true if packet must be recirculated after decapsulation. */
6380 static bool
6381 xlate_generic_decap_action(struct xlate_ctx *ctx,
6382 const struct ofpact_decap *decap OVS_UNUSED)
6383 {
6384 struct flow *flow = &ctx->xin->flow;
6385
6386 /* Ensure that any pending actions on the current packet are applied
6387 * before generating the decap action. */
6388 xlate_commit_actions(ctx);
6389
6390 /* We assume for now that the new_pkt_type is PT_USE_NEXT_PROTO. */
6391 switch (ntohl(flow->packet_type)) {
6392 case PT_ETH:
6393 if (flow->vlans[0].tci & htons(VLAN_CFI)) {
6394 /* Error handling: drop packet. */
6395 xlate_report_debug(ctx, OFT_ACTION, "Dropping packet, cannot "
6396 "decap Ethernet if VLAN is present.");
6397 ctx->error = XLATE_UNSUPPORTED_PACKET_TYPE;
6398 } else {
6399 /* Just change the packet_type.
6400 * Delay generating pop_eth to the next commit. */
6401 flow->packet_type = htonl(PACKET_TYPE(OFPHTN_ETHERTYPE,
6402 ntohs(flow->dl_type)));
6403 ctx->wc->masks.dl_type = OVS_BE16_MAX;
6404 }
6405 return false;
6406 case PT_NSH:
6407 /* The pop_nsh action is generated at the commit executed as
6408 * part of freezing the ctx for recirculation. Here we just set
6409 * the new packet type based on the NSH next protocol field. */
6410 switch (flow->nsh.np) {
6411 case NSH_P_ETHERNET:
6412 flow->packet_type = htonl(PT_ETH);
6413 break;
6414 case NSH_P_IPV4:
6415 flow->packet_type = htonl(PT_IPV4);
6416 break;
6417 case NSH_P_IPV6:
6418 flow->packet_type = htonl(PT_IPV6);
6419 break;
6420 case NSH_P_NSH:
6421 flow->packet_type = htonl(PT_NSH);
6422 break;
6423 default:
6424 /* Error handling: drop packet. */
6425 xlate_report_debug(ctx, OFT_ACTION,
6426 "Dropping packet as NSH next protocol %d "
6427 "is not supported", flow->nsh.np);
6428 ctx->error = XLATE_UNSUPPORTED_PACKET_TYPE;
6429 return false;
6430 break;
6431 }
6432 ctx->wc->masks.nsh.np = UINT8_MAX;
6433 ctx->pending_decap = true;
6434 /* Trigger recirculation. */
6435 return true;
6436 default:
6437 /* Error handling: drop packet. */
6438 xlate_report_debug(
6439 ctx, OFT_ACTION,
6440 "Dropping packet as the decap() does not support "
6441 "packet type (%d,0x%x)",
6442 pt_ns(flow->packet_type), pt_ns_type(flow->packet_type));
6443 ctx->error = XLATE_UNSUPPORTED_PACKET_TYPE;
6444 return false;
6445 }
6446 }
6447
6448 static void
6449 recirc_for_mpls(const struct ofpact *a, struct xlate_ctx *ctx)
6450 {
6451 /* No need to recirculate if already exiting. */
6452 if (ctx->exit) {
6453 return;
6454 }
6455
6456 /* Do not consider recirculating unless the packet was previously MPLS. */
6457 if (!ctx->was_mpls) {
6458 return;
6459 }
6460
6461 /* Special case these actions, only recirculating if necessary.
6462 * This avoids the overhead of recirculation in common use-cases.
6463 */
6464 switch (a->type) {
6465
6466 /* Output actions do not require recirculation. */
6467 case OFPACT_OUTPUT:
6468 case OFPACT_OUTPUT_TRUNC:
6469 case OFPACT_ENQUEUE:
6470 case OFPACT_OUTPUT_REG:
6471 /* Set actions that don't touch L3+ fields do not require recirculation. */
6472 case OFPACT_SET_VLAN_VID:
6473 case OFPACT_SET_VLAN_PCP:
6474 case OFPACT_SET_ETH_SRC:
6475 case OFPACT_SET_ETH_DST:
6476 case OFPACT_SET_TUNNEL:
6477 case OFPACT_SET_QUEUE:
6478 /* If actions of a group require recirculation that can be detected
6479 * when translating them. */
6480 case OFPACT_GROUP:
6481 return;
6482
6483 /* Set field that don't touch L3+ fields don't require recirculation. */
6484 case OFPACT_SET_FIELD:
6485 if (mf_is_l3_or_higher(ofpact_get_SET_FIELD(a)->field)) {
6486 break;
6487 }
6488 return;
6489
6490 /* For simplicity, recirculate in all other cases. */
6491 case OFPACT_CONTROLLER:
6492 case OFPACT_BUNDLE:
6493 case OFPACT_STRIP_VLAN:
6494 case OFPACT_PUSH_VLAN:
6495 case OFPACT_SET_IPV4_SRC:
6496 case OFPACT_SET_IPV4_DST:
6497 case OFPACT_SET_IP_DSCP:
6498 case OFPACT_SET_IP_ECN:
6499 case OFPACT_SET_IP_TTL:
6500 case OFPACT_SET_L4_SRC_PORT:
6501 case OFPACT_SET_L4_DST_PORT:
6502 case OFPACT_REG_MOVE:
6503 case OFPACT_STACK_PUSH:
6504 case OFPACT_STACK_POP:
6505 case OFPACT_DEC_TTL:
6506 case OFPACT_SET_MPLS_LABEL:
6507 case OFPACT_SET_MPLS_TC:
6508 case OFPACT_SET_MPLS_TTL:
6509 case OFPACT_DEC_MPLS_TTL:
6510 case OFPACT_PUSH_MPLS:
6511 case OFPACT_POP_MPLS:
6512 case OFPACT_POP_QUEUE:
6513 case OFPACT_FIN_TIMEOUT:
6514 case OFPACT_RESUBMIT:
6515 case OFPACT_LEARN:
6516 case OFPACT_CONJUNCTION:
6517 case OFPACT_MULTIPATH:
6518 case OFPACT_NOTE:
6519 case OFPACT_EXIT:
6520 case OFPACT_SAMPLE:
6521 case OFPACT_CLONE:
6522 case OFPACT_ENCAP:
6523 case OFPACT_DECAP:
6524 case OFPACT_DEC_NSH_TTL:
6525 case OFPACT_UNROLL_XLATE:
6526 case OFPACT_CT:
6527 case OFPACT_CT_CLEAR:
6528 case OFPACT_NAT:
6529 case OFPACT_DEBUG_RECIRC:
6530 case OFPACT_DEBUG_SLOW:
6531 case OFPACT_METER:
6532 case OFPACT_CLEAR_ACTIONS:
6533 case OFPACT_WRITE_ACTIONS:
6534 case OFPACT_WRITE_METADATA:
6535 case OFPACT_GOTO_TABLE:
6536 case OFPACT_CHECK_PKT_LARGER:
6537 default:
6538 break;
6539 }
6540
6541 /* Recirculate */
6542 ctx_trigger_freeze(ctx);
6543 }
6544
6545 static void
6546 xlate_ofpact_reg_move(struct xlate_ctx *ctx, const struct ofpact_reg_move *a)
6547 {
6548 mf_subfield_copy(&a->src, &a->dst, &ctx->xin->flow, ctx->wc);
6549 xlate_report_subfield(ctx, &a->dst);
6550 }
6551
6552 static void
6553 xlate_ofpact_stack_pop(struct xlate_ctx *ctx, const struct ofpact_stack *a)
6554 {
6555 if (nxm_execute_stack_pop(a, &ctx->xin->flow, ctx->wc, &ctx->stack)) {
6556 xlate_report_subfield(ctx, &a->subfield);
6557 } else {
6558 xlate_report_error(ctx, "stack underflow");
6559 }
6560 }
6561
6562 /* Restore translation context data that was stored earlier. */
6563 static void
6564 xlate_ofpact_unroll_xlate(struct xlate_ctx *ctx,
6565 const struct ofpact_unroll_xlate *a)
6566 {
6567 ctx->table_id = a->rule_table_id;
6568 ctx->rule_cookie = a->rule_cookie;
6569 xlate_report(ctx, OFT_THAW, "restored state: table=%"PRIu8", "
6570 "cookie=%#"PRIx64, a->rule_table_id, a->rule_cookie);
6571 }
6572
6573 static void
6574 do_xlate_actions(const struct ofpact *ofpacts, size_t ofpacts_len,
6575 struct xlate_ctx *ctx, bool is_last_action,
6576 bool group_bucket_action)
6577 {
6578 struct flow_wildcards *wc = ctx->wc;
6579 struct flow *flow = &ctx->xin->flow;
6580 const struct ofpact *a;
6581
6582 /* dl_type already in the mask, not set below. */
6583
6584 if (!ofpacts_len) {
6585 xlate_report(ctx, OFT_ACTION, "drop");
6586 return;
6587 }
6588
6589 OFPACT_FOR_EACH (a, ofpacts, ofpacts_len) {
6590 struct ofpact_controller *controller;
6591 const struct ofpact_metadata *metadata;
6592 const struct ofpact_set_field *set_field;
6593 const struct mf_field *mf;
6594 bool last = is_last_action && ofpact_last(a, ofpacts, ofpacts_len)
6595 && ctx->action_set.size;
6596
6597 if (ctx->error) {
6598 break;
6599 }
6600
6601 recirc_for_mpls(a, ctx);
6602
6603 if (ctx->exit) {
6604 /* Check if need to store the remaining actions for later
6605 * execution. */
6606 if (ctx->freezing) {
6607 freeze_unroll_actions(a, ofpact_end(ofpacts, ofpacts_len),
6608 ctx);
6609 }
6610 break;
6611 }
6612
6613 if (OVS_UNLIKELY(ctx->xin->trace)) {
6614 struct ds s = DS_EMPTY_INITIALIZER;
6615 struct ofpact_format_params fp = { .s = &s };
6616 ofpacts_format(a, OFPACT_ALIGN(a->len), &fp);
6617 xlate_report(ctx, OFT_ACTION, "%s", ds_cstr(&s));
6618 ds_destroy(&s);
6619 }
6620
6621 switch (a->type) {
6622 case OFPACT_OUTPUT:
6623 xlate_output_action(ctx, ofpact_get_OUTPUT(a)->port,
6624 ofpact_get_OUTPUT(a)->max_len, true, last,
6625 false, group_bucket_action);
6626 break;
6627
6628 case OFPACT_GROUP:
6629 if (xlate_group_action(ctx, ofpact_get_GROUP(a)->group_id, last)) {
6630 /* Group could not be found. */
6631
6632 /* XXX: Terminates action list translation, but does not
6633 * terminate the pipeline. */
6634 return;
6635 }
6636 break;
6637
6638 case OFPACT_CONTROLLER:
6639 controller = ofpact_get_CONTROLLER(a);
6640 if (controller->pause) {
6641 ctx->pause = controller;
6642 ctx_trigger_freeze(ctx);
6643 a = ofpact_next(a);
6644 } else {
6645 xlate_controller_action(ctx, controller->max_len,
6646 controller->reason,
6647 controller->controller_id,
6648 controller->provider_meter_id,
6649 controller->userdata,
6650 controller->userdata_len);
6651 }
6652 break;
6653
6654 case OFPACT_ENQUEUE:
6655 memset(&wc->masks.skb_priority, 0xff,
6656 sizeof wc->masks.skb_priority);
6657 xlate_enqueue_action(ctx, ofpact_get_ENQUEUE(a), last,
6658 group_bucket_action);
6659 break;
6660
6661 case OFPACT_SET_VLAN_VID:
6662 wc->masks.vlans[0].tci |= htons(VLAN_VID_MASK | VLAN_CFI);
6663 if (flow->vlans[0].tci & htons(VLAN_CFI) ||
6664 ofpact_get_SET_VLAN_VID(a)->push_vlan_if_needed) {
6665 if (!flow->vlans[0].tpid) {
6666 flow->vlans[0].tpid = htons(ETH_TYPE_VLAN);
6667 }
6668 flow->vlans[0].tci &= ~htons(VLAN_VID_MASK);
6669 flow->vlans[0].tci |=
6670 (htons(ofpact_get_SET_VLAN_VID(a)->vlan_vid) |
6671 htons(VLAN_CFI));
6672 }
6673 break;
6674
6675 case OFPACT_SET_VLAN_PCP:
6676 wc->masks.vlans[0].tci |= htons(VLAN_PCP_MASK | VLAN_CFI);
6677 if (flow->vlans[0].tci & htons(VLAN_CFI) ||
6678 ofpact_get_SET_VLAN_PCP(a)->push_vlan_if_needed) {
6679 if (!flow->vlans[0].tpid) {
6680 flow->vlans[0].tpid = htons(ETH_TYPE_VLAN);
6681 }
6682 flow->vlans[0].tci &= ~htons(VLAN_PCP_MASK);
6683 flow->vlans[0].tci |=
6684 htons((ofpact_get_SET_VLAN_PCP(a)->vlan_pcp
6685 << VLAN_PCP_SHIFT) | VLAN_CFI);
6686 }
6687 break;
6688
6689 case OFPACT_STRIP_VLAN:
6690 flow_pop_vlan(flow, wc);
6691 break;
6692
6693 case OFPACT_PUSH_VLAN:
6694 flow_push_vlan_uninit(flow, wc);
6695 flow->vlans[0].tpid = ofpact_get_PUSH_VLAN(a)->ethertype;
6696 flow->vlans[0].tci = htons(VLAN_CFI);
6697 break;
6698
6699 case OFPACT_SET_ETH_SRC:
6700 WC_MASK_FIELD(wc, dl_src);
6701 flow->dl_src = ofpact_get_SET_ETH_SRC(a)->mac;
6702 break;
6703
6704 case OFPACT_SET_ETH_DST:
6705 WC_MASK_FIELD(wc, dl_dst);
6706 flow->dl_dst = ofpact_get_SET_ETH_DST(a)->mac;
6707 break;
6708
6709 case OFPACT_SET_IPV4_SRC:
6710 if (flow->dl_type == htons(ETH_TYPE_IP)) {
6711 memset(&wc->masks.nw_src, 0xff, sizeof wc->masks.nw_src);
6712 flow->nw_src = ofpact_get_SET_IPV4_SRC(a)->ipv4;
6713 }
6714 break;
6715
6716 case OFPACT_SET_IPV4_DST:
6717 if (flow->dl_type == htons(ETH_TYPE_IP)) {
6718 memset(&wc->masks.nw_dst, 0xff, sizeof wc->masks.nw_dst);
6719 flow->nw_dst = ofpact_get_SET_IPV4_DST(a)->ipv4;
6720 }
6721 break;
6722
6723 case OFPACT_SET_IP_DSCP:
6724 if (is_ip_any(flow)) {
6725 wc->masks.nw_tos |= IP_DSCP_MASK;
6726 flow->nw_tos &= ~IP_DSCP_MASK;
6727 flow->nw_tos |= ofpact_get_SET_IP_DSCP(a)->dscp;
6728 }
6729 break;
6730
6731 case OFPACT_SET_IP_ECN:
6732 if (is_ip_any(flow)) {
6733 wc->masks.nw_tos |= IP_ECN_MASK;
6734 flow->nw_tos &= ~IP_ECN_MASK;
6735 flow->nw_tos |= ofpact_get_SET_IP_ECN(a)->ecn;
6736 }
6737 break;
6738
6739 case OFPACT_SET_IP_TTL:
6740 if (is_ip_any(flow)) {
6741 wc->masks.nw_ttl = 0xff;
6742 flow->nw_ttl = ofpact_get_SET_IP_TTL(a)->ttl;
6743 }
6744 break;
6745
6746 case OFPACT_SET_L4_SRC_PORT:
6747 if (is_ip_any(flow) && !(flow->nw_frag & FLOW_NW_FRAG_LATER)) {
6748 memset(&wc->masks.nw_proto, 0xff, sizeof wc->masks.nw_proto);
6749 memset(&wc->masks.tp_src, 0xff, sizeof wc->masks.tp_src);
6750 flow->tp_src = htons(ofpact_get_SET_L4_SRC_PORT(a)->port);
6751 }
6752 break;
6753
6754 case OFPACT_SET_L4_DST_PORT:
6755 if (is_ip_any(flow) && !(flow->nw_frag & FLOW_NW_FRAG_LATER)) {
6756 memset(&wc->masks.nw_proto, 0xff, sizeof wc->masks.nw_proto);
6757 memset(&wc->masks.tp_dst, 0xff, sizeof wc->masks.tp_dst);
6758 flow->tp_dst = htons(ofpact_get_SET_L4_DST_PORT(a)->port);
6759 }
6760 break;
6761
6762 case OFPACT_RESUBMIT:
6763 /* Freezing complicates resubmit. Some action in the flow
6764 * entry found by resubmit might trigger freezing. If that
6765 * happens, then we do not want to execute the resubmit again after
6766 * during thawing, so we want to skip back to the head of the loop
6767 * to avoid that, only adding any actions that follow the resubmit
6768 * to the frozen actions.
6769 */
6770 xlate_ofpact_resubmit(ctx, ofpact_get_RESUBMIT(a), last);
6771 continue;
6772
6773 case OFPACT_SET_TUNNEL:
6774 flow->tunnel.tun_id = htonll(ofpact_get_SET_TUNNEL(a)->tun_id);
6775 break;
6776
6777 case OFPACT_SET_QUEUE:
6778 memset(&wc->masks.skb_priority, 0xff,
6779 sizeof wc->masks.skb_priority);
6780 xlate_set_queue_action(ctx, ofpact_get_SET_QUEUE(a)->queue_id);
6781 break;
6782
6783 case OFPACT_POP_QUEUE:
6784 memset(&wc->masks.skb_priority, 0xff,
6785 sizeof wc->masks.skb_priority);
6786 if (flow->skb_priority != ctx->orig_skb_priority) {
6787 flow->skb_priority = ctx->orig_skb_priority;
6788 xlate_report(ctx, OFT_DETAIL, "queue = %#"PRIx32,
6789 flow->skb_priority);
6790 }
6791 break;
6792
6793 case OFPACT_REG_MOVE:
6794 xlate_ofpact_reg_move(ctx, ofpact_get_REG_MOVE(a));
6795 break;
6796
6797 case OFPACT_SET_FIELD:
6798 set_field = ofpact_get_SET_FIELD(a);
6799 mf = set_field->field;
6800
6801 /* Set the field only if the packet actually has it. */
6802 if (mf_are_prereqs_ok(mf, flow, wc)) {
6803 mf_mask_field_masked(mf, ofpact_set_field_mask(set_field), wc);
6804 mf_set_flow_value_masked(mf, set_field->value,
6805 ofpact_set_field_mask(set_field),
6806 flow);
6807 } else {
6808 xlate_report(ctx, OFT_WARN,
6809 "unmet prerequisites for %s, set_field ignored",
6810 mf->name);
6811
6812 }
6813 break;
6814
6815 case OFPACT_STACK_PUSH:
6816 nxm_execute_stack_push(ofpact_get_STACK_PUSH(a), flow, wc,
6817 &ctx->stack);
6818 break;
6819
6820 case OFPACT_STACK_POP:
6821 xlate_ofpact_stack_pop(ctx, ofpact_get_STACK_POP(a));
6822 break;
6823
6824 case OFPACT_PUSH_MPLS:
6825 compose_mpls_push_action(ctx, ofpact_get_PUSH_MPLS(a));
6826 break;
6827
6828 case OFPACT_POP_MPLS:
6829 compose_mpls_pop_action(ctx, ofpact_get_POP_MPLS(a)->ethertype);
6830 break;
6831
6832 case OFPACT_SET_MPLS_LABEL:
6833 compose_set_mpls_label_action(
6834 ctx, ofpact_get_SET_MPLS_LABEL(a)->label);
6835 break;
6836
6837 case OFPACT_SET_MPLS_TC:
6838 compose_set_mpls_tc_action(ctx, ofpact_get_SET_MPLS_TC(a)->tc);
6839 break;
6840
6841 case OFPACT_SET_MPLS_TTL:
6842 compose_set_mpls_ttl_action(ctx, ofpact_get_SET_MPLS_TTL(a)->ttl);
6843 break;
6844
6845 case OFPACT_DEC_MPLS_TTL:
6846 if (compose_dec_mpls_ttl_action(ctx)) {
6847 return;
6848 }
6849 break;
6850
6851 case OFPACT_DEC_NSH_TTL:
6852 if (compose_dec_nsh_ttl_action(ctx)) {
6853 return;
6854 }
6855 break;
6856
6857 case OFPACT_DEC_TTL:
6858 wc->masks.nw_ttl = 0xff;
6859 if (compose_dec_ttl(ctx, ofpact_get_DEC_TTL(a))) {
6860 return;
6861 }
6862 break;
6863
6864 case OFPACT_NOTE:
6865 /* Nothing to do. */
6866 break;
6867
6868 case OFPACT_MULTIPATH:
6869 multipath_execute(ofpact_get_MULTIPATH(a), flow, wc);
6870 xlate_report_subfield(ctx, &ofpact_get_MULTIPATH(a)->dst);
6871 break;
6872
6873 case OFPACT_BUNDLE:
6874 xlate_bundle_action(ctx, ofpact_get_BUNDLE(a), last,
6875 group_bucket_action);
6876 break;
6877
6878 case OFPACT_OUTPUT_REG:
6879 xlate_output_reg_action(ctx, ofpact_get_OUTPUT_REG(a), last,
6880 group_bucket_action);
6881 break;
6882
6883 case OFPACT_OUTPUT_TRUNC:
6884 xlate_output_trunc_action(ctx, ofpact_get_OUTPUT_TRUNC(a)->port,
6885 ofpact_get_OUTPUT_TRUNC(a)->max_len, last,
6886 group_bucket_action);
6887 break;
6888
6889 case OFPACT_LEARN:
6890 xlate_learn_action(ctx, ofpact_get_LEARN(a));
6891 break;
6892
6893 case OFPACT_CONJUNCTION:
6894 /* A flow with a "conjunction" action represents part of a special
6895 * kind of "set membership match". Such a flow should not actually
6896 * get executed, but it could via, say, a "packet-out", even though
6897 * that wouldn't be useful. Log it to help debugging. */
6898 xlate_report_error(ctx, "executing no-op conjunction action");
6899 break;
6900
6901 case OFPACT_EXIT:
6902 ctx->exit = true;
6903 break;
6904
6905 case OFPACT_UNROLL_XLATE:
6906 xlate_ofpact_unroll_xlate(ctx, ofpact_get_UNROLL_XLATE(a));
6907 break;
6908
6909 case OFPACT_FIN_TIMEOUT:
6910 memset(&wc->masks.nw_proto, 0xff, sizeof wc->masks.nw_proto);
6911 xlate_fin_timeout(ctx, ofpact_get_FIN_TIMEOUT(a));
6912 break;
6913
6914 case OFPACT_CLEAR_ACTIONS:
6915 xlate_report_action_set(ctx, "was");
6916 ofpbuf_clear(&ctx->action_set);
6917 ctx->xin->flow.actset_output = OFPP_UNSET;
6918 ctx->action_set_has_group = false;
6919 break;
6920
6921 case OFPACT_WRITE_ACTIONS:
6922 xlate_write_actions(ctx, ofpact_get_WRITE_ACTIONS(a));
6923 xlate_report_action_set(ctx, "is");
6924 break;
6925
6926 case OFPACT_WRITE_METADATA:
6927 metadata = ofpact_get_WRITE_METADATA(a);
6928 flow->metadata &= ~metadata->mask;
6929 flow->metadata |= metadata->metadata & metadata->mask;
6930 break;
6931
6932 case OFPACT_METER:
6933 xlate_meter_action(ctx, ofpact_get_METER(a));
6934 break;
6935
6936 case OFPACT_GOTO_TABLE: {
6937 struct ofpact_goto_table *ogt = ofpact_get_GOTO_TABLE(a);
6938
6939 ovs_assert(ctx->table_id < ogt->table_id);
6940
6941 xlate_table_action(ctx, ctx->xin->flow.in_port.ofp_port,
6942 ogt->table_id, true, true, false, last,
6943 do_xlate_actions);
6944 break;
6945 }
6946
6947 case OFPACT_SAMPLE:
6948 xlate_sample_action(ctx, ofpact_get_SAMPLE(a));
6949 break;
6950
6951 case OFPACT_CLONE:
6952 compose_clone(ctx, ofpact_get_CLONE(a), last);
6953 break;
6954
6955 case OFPACT_ENCAP:
6956 xlate_generic_encap_action(ctx, ofpact_get_ENCAP(a));
6957 break;
6958
6959 case OFPACT_DECAP: {
6960 bool recirc_needed =
6961 xlate_generic_decap_action(ctx, ofpact_get_DECAP(a));
6962 if (!ctx->error && recirc_needed) {
6963 /* Recirculate for parsing of inner packet. */
6964 ctx_trigger_freeze(ctx);
6965 /* Then continue with next action. */
6966 a = ofpact_next(a);
6967 }
6968 break;
6969 }
6970
6971 case OFPACT_CT:
6972 compose_conntrack_action(ctx, ofpact_get_CT(a), last);
6973 break;
6974
6975 case OFPACT_CT_CLEAR:
6976 compose_ct_clear_action(ctx);
6977 break;
6978
6979 case OFPACT_NAT:
6980 /* This will be processed by compose_conntrack_action(). */
6981 ctx->ct_nat_action = ofpact_get_NAT(a);
6982 break;
6983
6984 case OFPACT_DEBUG_RECIRC:
6985 ctx_trigger_freeze(ctx);
6986 a = ofpact_next(a);
6987 break;
6988
6989 case OFPACT_DEBUG_SLOW:
6990 ctx->xout->slow |= SLOW_ACTION;
6991 break;
6992
6993 case OFPACT_CHECK_PKT_LARGER: {
6994 if (last) {
6995 /* If this is last action, then there is no need to
6996 * translate the action. */
6997 break;
6998 }
6999 const struct ofpact *remaining_acts = ofpact_next(a);
7000 size_t remaining_acts_len = ofpact_remaining_len(remaining_acts,
7001 ofpacts,
7002 ofpacts_len);
7003 xlate_check_pkt_larger(ctx, ofpact_get_CHECK_PKT_LARGER(a),
7004 remaining_acts, remaining_acts_len);
7005 break;
7006 }
7007 }
7008
7009 /* Check if need to store this and the remaining actions for later
7010 * execution. */
7011 if (!ctx->error && ctx->exit && ctx_first_frozen_action(ctx)) {
7012 freeze_unroll_actions(a, ofpact_end(ofpacts, ofpacts_len), ctx);
7013 break;
7014 }
7015 }
7016 }
7017
7018 void
7019 xlate_in_init(struct xlate_in *xin, struct ofproto_dpif *ofproto,
7020 ovs_version_t version, const struct flow *flow,
7021 ofp_port_t in_port, struct rule_dpif *rule, uint16_t tcp_flags,
7022 const struct dp_packet *packet, struct flow_wildcards *wc,
7023 struct ofpbuf *odp_actions)
7024 {
7025 xin->ofproto = ofproto;
7026 xin->tables_version = version;
7027 xin->flow = *flow;
7028 xin->upcall_flow = flow;
7029 xin->flow.in_port.ofp_port = in_port;
7030 xin->flow.actset_output = OFPP_UNSET;
7031 xin->packet = packet;
7032 xin->allow_side_effects = packet != NULL;
7033 xin->rule = rule;
7034 xin->xcache = NULL;
7035 xin->ofpacts = NULL;
7036 xin->ofpacts_len = 0;
7037 xin->tcp_flags = tcp_flags;
7038 xin->trace = NULL;
7039 xin->resubmit_stats = NULL;
7040 xin->depth = 0;
7041 xin->resubmits = 0;
7042 xin->wc = wc;
7043 xin->odp_actions = odp_actions;
7044 xin->in_packet_out = false;
7045 xin->recirc_queue = NULL;
7046 xin->xport_uuid = UUID_ZERO;
7047
7048 /* Do recirc lookup. */
7049 xin->frozen_state = NULL;
7050 if (flow->recirc_id) {
7051 const struct recirc_id_node *node
7052 = recirc_id_node_find(flow->recirc_id);
7053 if (node) {
7054 xin->frozen_state = &node->state;
7055 }
7056 }
7057 }
7058
7059 void
7060 xlate_out_uninit(struct xlate_out *xout)
7061 {
7062 if (xout) {
7063 recirc_refs_unref(&xout->recircs);
7064 }
7065 }
7066 \f
7067 static struct skb_priority_to_dscp *
7068 get_skb_priority(const struct xport *xport, uint32_t skb_priority)
7069 {
7070 struct skb_priority_to_dscp *pdscp;
7071 uint32_t hash;
7072
7073 hash = hash_int(skb_priority, 0);
7074 HMAP_FOR_EACH_IN_BUCKET (pdscp, hmap_node, hash, &xport->skb_priorities) {
7075 if (pdscp->skb_priority == skb_priority) {
7076 return pdscp;
7077 }
7078 }
7079 return NULL;
7080 }
7081
7082 static bool
7083 dscp_from_skb_priority(const struct xport *xport, uint32_t skb_priority,
7084 uint8_t *dscp)
7085 {
7086 struct skb_priority_to_dscp *pdscp = get_skb_priority(xport, skb_priority);
7087 *dscp = pdscp ? pdscp->dscp : 0;
7088 return pdscp != NULL;
7089 }
7090
7091 static size_t
7092 count_skb_priorities(const struct xport *xport)
7093 {
7094 return hmap_count(&xport->skb_priorities);
7095 }
7096
7097 static void
7098 clear_skb_priorities(struct xport *xport)
7099 {
7100 struct skb_priority_to_dscp *pdscp;
7101
7102 HMAP_FOR_EACH_POP (pdscp, hmap_node, &xport->skb_priorities) {
7103 free(pdscp);
7104 }
7105 }
7106
7107 static bool
7108 actions_output_to_local_port(const struct xlate_ctx *ctx)
7109 {
7110 odp_port_t local_odp_port = ofp_port_to_odp_port(ctx->xbridge, OFPP_LOCAL);
7111 const struct nlattr *a;
7112 unsigned int left;
7113
7114 NL_ATTR_FOR_EACH_UNSAFE (a, left, ctx->odp_actions->data,
7115 ctx->odp_actions->size) {
7116 if (nl_attr_type(a) == OVS_ACTION_ATTR_OUTPUT
7117 && nl_attr_get_odp_port(a) == local_odp_port) {
7118 return true;
7119 }
7120 }
7121 return false;
7122 }
7123
7124 #if defined(__linux__)
7125 /* Returns the maximum number of packets that the Linux kernel is willing to
7126 * queue up internally to certain kinds of software-implemented ports, or the
7127 * default (and rarely modified) value if it cannot be determined. */
7128 static int
7129 netdev_max_backlog(void)
7130 {
7131 static struct ovsthread_once once = OVSTHREAD_ONCE_INITIALIZER;
7132 static int max_backlog = 1000; /* The normal default value. */
7133
7134 if (ovsthread_once_start(&once)) {
7135 static const char filename[] = "/proc/sys/net/core/netdev_max_backlog";
7136 FILE *stream;
7137 int n;
7138
7139 stream = fopen(filename, "r");
7140 if (!stream) {
7141 VLOG_INFO("%s: open failed (%s)", filename, ovs_strerror(errno));
7142 } else {
7143 if (fscanf(stream, "%d", &n) != 1) {
7144 VLOG_WARN("%s: read error", filename);
7145 } else if (n <= 100) {
7146 VLOG_WARN("%s: unexpectedly small value %d", filename, n);
7147 } else {
7148 max_backlog = n;
7149 }
7150 fclose(stream);
7151 }
7152 ovsthread_once_done(&once);
7153
7154 VLOG_DBG("%s: using %d max_backlog", filename, max_backlog);
7155 }
7156
7157 return max_backlog;
7158 }
7159
7160 /* Counts and returns the number of OVS_ACTION_ATTR_OUTPUT actions in
7161 * 'odp_actions'. */
7162 static int
7163 count_output_actions(const struct ofpbuf *odp_actions)
7164 {
7165 const struct nlattr *a;
7166 size_t left;
7167 int n = 0;
7168
7169 NL_ATTR_FOR_EACH_UNSAFE (a, left, odp_actions->data, odp_actions->size) {
7170 if (a->nla_type == OVS_ACTION_ATTR_OUTPUT) {
7171 n++;
7172 }
7173 }
7174 return n;
7175 }
7176 #endif /* defined(__linux__) */
7177
7178 /* Returns true if 'odp_actions' contains more output actions than the datapath
7179 * can reliably handle in one go. On Linux, this is the value of the
7180 * net.core.netdev_max_backlog sysctl, which limits the maximum number of
7181 * packets that the kernel is willing to queue up for processing while the
7182 * datapath is processing a set of actions. */
7183 static bool
7184 too_many_output_actions(const struct ofpbuf *odp_actions OVS_UNUSED)
7185 {
7186 #ifdef __linux__
7187 return (odp_actions->size / NL_A_U32_SIZE > netdev_max_backlog()
7188 && count_output_actions(odp_actions) > netdev_max_backlog());
7189 #else
7190 /* OSes other than Linux might have similar limits, but we don't know how
7191 * to determine them.*/
7192 return false;
7193 #endif
7194 }
7195
7196 static void
7197 xlate_wc_init(struct xlate_ctx *ctx)
7198 {
7199 flow_wildcards_init_catchall(ctx->wc);
7200
7201 /* Some fields we consider to always be examined. */
7202 WC_MASK_FIELD(ctx->wc, packet_type);
7203 WC_MASK_FIELD(ctx->wc, in_port);
7204 WC_MASK_FIELD(ctx->wc, dl_type);
7205 if (is_ip_any(&ctx->xin->flow)) {
7206 WC_MASK_FIELD_MASK(ctx->wc, nw_frag, FLOW_NW_FRAG_MASK);
7207 }
7208
7209 if (ctx->xbridge->support.odp.recirc) {
7210 /* Always exactly match recirc_id when datapath supports
7211 * recirculation. */
7212 WC_MASK_FIELD(ctx->wc, recirc_id);
7213 }
7214
7215 if (ctx->xbridge->netflow) {
7216 netflow_mask_wc(&ctx->xin->flow, ctx->wc);
7217 }
7218
7219 tnl_wc_init(&ctx->xin->flow, ctx->wc);
7220 }
7221
7222 static void
7223 xlate_wc_finish(struct xlate_ctx *ctx)
7224 {
7225 int i;
7226
7227 /* Clear the metadata and register wildcard masks, because we won't
7228 * use non-header fields as part of the cache. */
7229 flow_wildcards_clear_non_packet_fields(ctx->wc);
7230
7231 /* Wildcard Ethernet address fields if the original packet type was not
7232 * Ethernet.
7233 *
7234 * (The Ethertype field is used even when the original packet type is not
7235 * Ethernet.) */
7236 if (ctx->xin->upcall_flow->packet_type != htonl(PT_ETH)) {
7237 ctx->wc->masks.dl_dst = eth_addr_zero;
7238 ctx->wc->masks.dl_src = eth_addr_zero;
7239 }
7240
7241 /* ICMPv4 and ICMPv6 have 8-bit "type" and "code" fields. struct flow
7242 * uses the low 8 bits of the 16-bit tp_src and tp_dst members to
7243 * represent these fields. The datapath interface, on the other hand,
7244 * represents them with just 8 bits each. This means that if the high
7245 * 8 bits of the masks for these fields somehow become set, then they
7246 * will get chopped off by a round trip through the datapath, and
7247 * revalidation will spot that as an inconsistency and delete the flow.
7248 * Avoid the problem here by making sure that only the low 8 bits of
7249 * either field can be unwildcarded for ICMP.
7250 */
7251 if (is_icmpv4(&ctx->xin->flow, NULL) || is_icmpv6(&ctx->xin->flow, NULL)) {
7252 ctx->wc->masks.tp_src &= htons(UINT8_MAX);
7253 ctx->wc->masks.tp_dst &= htons(UINT8_MAX);
7254 }
7255 /* VLAN_TCI CFI bit must be matched if any of the TCI is matched. */
7256 for (i = 0; i < FLOW_MAX_VLAN_HEADERS; i++) {
7257 if (ctx->wc->masks.vlans[i].tci) {
7258 ctx->wc->masks.vlans[i].tci |= htons(VLAN_CFI);
7259 }
7260 }
7261
7262 /* The classifier might return masks that match on tp_src and tp_dst even
7263 * for later fragments. This happens because there might be flows that
7264 * match on tp_src or tp_dst without matching on the frag bits, because
7265 * it is not a prerequisite for OpenFlow. Since it is a prerequisite for
7266 * datapath flows and since tp_src and tp_dst are always going to be 0,
7267 * wildcard the fields here. */
7268 if (ctx->xin->flow.nw_frag & FLOW_NW_FRAG_LATER) {
7269 ctx->wc->masks.tp_src = 0;
7270 ctx->wc->masks.tp_dst = 0;
7271 }
7272 }
7273
7274 /* Translates the flow, actions, or rule in 'xin' into datapath actions in
7275 * 'xout'.
7276 * The caller must take responsibility for eventually freeing 'xout', with
7277 * xlate_out_uninit().
7278 * Returns 'XLATE_OK' if translation was successful. In case of an error an
7279 * empty set of actions will be returned in 'xin->odp_actions' (if non-NULL),
7280 * so that most callers may ignore the return value and transparently install a
7281 * drop flow when the translation fails. */
7282 enum xlate_error
7283 xlate_actions(struct xlate_in *xin, struct xlate_out *xout)
7284 {
7285 *xout = (struct xlate_out) {
7286 .slow = 0,
7287 .recircs = RECIRC_REFS_EMPTY_INITIALIZER,
7288 };
7289
7290 struct xlate_cfg *xcfg = ovsrcu_get(struct xlate_cfg *, &xcfgp);
7291 struct xbridge *xbridge = xbridge_lookup(xcfg, xin->ofproto);
7292 if (!xbridge) {
7293 return XLATE_BRIDGE_NOT_FOUND;
7294 }
7295
7296 struct flow *flow = &xin->flow;
7297
7298 uint8_t stack_stub[1024];
7299 uint64_t action_set_stub[1024 / 8];
7300 uint64_t frozen_actions_stub[1024 / 8];
7301 uint64_t actions_stub[256 / 8];
7302 struct ofpbuf scratch_actions = OFPBUF_STUB_INITIALIZER(actions_stub);
7303 struct xlate_ctx ctx = {
7304 .xin = xin,
7305 .xout = xout,
7306 .base_flow = *flow,
7307 .orig_tunnel_ipv6_dst = flow_tnl_dst(&flow->tunnel),
7308 .xcfg = xcfg,
7309 .xbridge = xbridge,
7310 .stack = OFPBUF_STUB_INITIALIZER(stack_stub),
7311 .rule = xin->rule,
7312 .wc = (xin->wc
7313 ? xin->wc
7314 : &(struct flow_wildcards) { .masks = { .dl_type = 0 } }),
7315 .odp_actions = xin->odp_actions ? xin->odp_actions : &scratch_actions,
7316
7317 .depth = xin->depth,
7318 .resubmits = xin->resubmits,
7319 .in_action_set = false,
7320 .in_packet_out = xin->in_packet_out,
7321 .pending_encap = false,
7322 .pending_decap = false,
7323 .encap_data = NULL,
7324
7325 .table_id = 0,
7326 .rule_cookie = OVS_BE64_MAX,
7327 .orig_skb_priority = flow->skb_priority,
7328 .sflow_n_outputs = 0,
7329 .sflow_odp_port = 0,
7330 .nf_output_iface = NF_OUT_DROP,
7331 .exit = false,
7332 .error = XLATE_OK,
7333 .mirrors = 0,
7334
7335 .freezing = false,
7336 .recirc_update_dp_hash = false,
7337 .frozen_actions = OFPBUF_STUB_INITIALIZER(frozen_actions_stub),
7338 .pause = NULL,
7339
7340 .was_mpls = false,
7341 .conntracked = false,
7342
7343 .ct_nat_action = NULL,
7344
7345 .action_set_has_group = false,
7346 .action_set = OFPBUF_STUB_INITIALIZER(action_set_stub),
7347 };
7348
7349 /* 'base_flow' reflects the packet as it came in, but we need it to reflect
7350 * the packet as the datapath will treat it for output actions. Our
7351 * datapath doesn't retain tunneling information without us re-setting
7352 * it, so clear the tunnel data.
7353 */
7354
7355 memset(&ctx.base_flow.tunnel, 0, sizeof ctx.base_flow.tunnel);
7356
7357 ofpbuf_reserve(ctx.odp_actions, NL_A_U32_SIZE);
7358 xlate_wc_init(&ctx);
7359
7360 COVERAGE_INC(xlate_actions);
7361
7362 xin->trace = xlate_report(&ctx, OFT_BRIDGE, "bridge(\"%s\")",
7363 xbridge->name);
7364 if (xin->frozen_state) {
7365 const struct frozen_state *state = xin->frozen_state;
7366
7367 struct ovs_list *old_trace = xin->trace;
7368 xin->trace = xlate_report(&ctx, OFT_THAW, "thaw");
7369
7370 if (xin->ofpacts_len > 0 || ctx.rule) {
7371 xlate_report_error(&ctx, "Recirculation conflict (%s)!",
7372 xin->ofpacts_len ? "actions" : "rule");
7373 ctx.error = XLATE_RECIRCULATION_CONFLICT;
7374 goto exit;
7375 }
7376
7377 /* Set the bridge for post-recirculation processing if needed. */
7378 if (!uuid_equals(&ctx.xbridge->ofproto->uuid, &state->ofproto_uuid)) {
7379 const struct xbridge *new_bridge
7380 = xbridge_lookup_by_uuid(xcfg, &state->ofproto_uuid);
7381
7382 if (OVS_UNLIKELY(!new_bridge)) {
7383 /* Drop the packet if the bridge cannot be found. */
7384 xlate_report_error(&ctx, "Frozen bridge no longer exists.");
7385 ctx.error = XLATE_BRIDGE_NOT_FOUND;
7386 xin->trace = old_trace;
7387 goto exit;
7388 }
7389 ctx.xbridge = new_bridge;
7390 /* The bridge is now known so obtain its table version. */
7391 ctx.xin->tables_version
7392 = ofproto_dpif_get_tables_version(ctx.xbridge->ofproto);
7393 }
7394
7395 /* Set the thawed table id. Note: A table lookup is done only if there
7396 * are no frozen actions. */
7397 ctx.table_id = state->table_id;
7398 xlate_report(&ctx, OFT_THAW,
7399 "Resuming from table %"PRIu8, ctx.table_id);
7400
7401 ctx.conntracked = state->conntracked;
7402 if (!state->conntracked) {
7403 clear_conntrack(&ctx);
7404 }
7405
7406 /* Restore pipeline metadata. May change flow's in_port and other
7407 * metadata to the values that existed when freezing was triggered. */
7408 frozen_metadata_to_flow(&state->metadata, flow);
7409
7410 /* Restore stack, if any. */
7411 if (state->stack) {
7412 ofpbuf_put(&ctx.stack, state->stack, state->stack_size);
7413 }
7414
7415 /* Restore mirror state. */
7416 ctx.mirrors = state->mirrors;
7417
7418 /* Restore action set, if any. */
7419 if (state->action_set_len) {
7420 xlate_report_actions(&ctx, OFT_THAW, "Restoring action set",
7421 state->action_set, state->action_set_len);
7422
7423 flow->actset_output = OFPP_UNSET;
7424 xlate_write_actions__(&ctx, state->action_set,
7425 state->action_set_len);
7426 }
7427
7428 /* Restore frozen actions. If there are no actions, processing will
7429 * start with a lookup in the table set above. */
7430 xin->ofpacts = state->ofpacts;
7431 xin->ofpacts_len = state->ofpacts_len;
7432 if (state->ofpacts_len) {
7433 xlate_report_actions(&ctx, OFT_THAW, "Restoring actions",
7434 xin->ofpacts, xin->ofpacts_len);
7435 }
7436
7437 xin->trace = old_trace;
7438 } else if (OVS_UNLIKELY(flow->recirc_id)) {
7439 xlate_report_error(&ctx,
7440 "Recirculation context not found for ID %"PRIx32,
7441 flow->recirc_id);
7442 ctx.error = XLATE_NO_RECIRCULATION_CONTEXT;
7443 goto exit;
7444 }
7445
7446 /* Tunnel metadata in udpif format must be normalized before translation. */
7447 if (flow->tunnel.flags & FLOW_TNL_F_UDPIF) {
7448 const struct tun_table *tun_tab = ofproto_get_tun_tab(
7449 &ctx.xbridge->ofproto->up);
7450 int err;
7451
7452 err = tun_metadata_from_geneve_udpif(tun_tab, &xin->upcall_flow->tunnel,
7453 &xin->upcall_flow->tunnel,
7454 &flow->tunnel);
7455 if (err) {
7456 xlate_report_error(&ctx, "Invalid Geneve tunnel metadata");
7457 ctx.error = XLATE_INVALID_TUNNEL_METADATA;
7458 goto exit;
7459 }
7460 } else if (!flow->tunnel.metadata.tab || xin->frozen_state) {
7461 /* If the original flow did not come in on a tunnel, then it won't have
7462 * FLOW_TNL_F_UDPIF set. However, we still need to have a metadata
7463 * table in case we generate tunnel actions. */
7464 /* If the translation is from a frozen state, we use the latest
7465 * TLV map to avoid segmentation fault in case the old TLV map is
7466 * replaced by a new one.
7467 * XXX: It is better to abort translation if the table is changed. */
7468 flow->tunnel.metadata.tab = ofproto_get_tun_tab(
7469 &ctx.xbridge->ofproto->up);
7470 }
7471 ctx.wc->masks.tunnel.metadata.tab = flow->tunnel.metadata.tab;
7472
7473 /* Get the proximate input port of the packet. (If xin->frozen_state,
7474 * flow->in_port is the ultimate input port of the packet.) */
7475 struct xport *in_port = get_ofp_port(xbridge,
7476 ctx.base_flow.in_port.ofp_port);
7477 if (in_port && !in_port->peer) {
7478 ctx.xin->xport_uuid = in_port->uuid;
7479 }
7480
7481 if (flow->packet_type != htonl(PT_ETH) && in_port &&
7482 in_port->pt_mode == NETDEV_PT_LEGACY_L3 && ctx.table_id == 0) {
7483 /* Add dummy Ethernet header to non-L2 packet if it's coming from a
7484 * L3 port. So all packets will be L2 packets for lookup.
7485 * The dl_type has already been set from the packet_type. */
7486 flow->packet_type = htonl(PT_ETH);
7487 flow->dl_src = eth_addr_zero;
7488 flow->dl_dst = eth_addr_zero;
7489 ctx.pending_encap = true;
7490 }
7491
7492 if (!xin->ofpacts && !ctx.rule) {
7493 ctx.rule = rule_dpif_lookup_from_table(
7494 ctx.xbridge->ofproto, ctx.xin->tables_version, flow, ctx.wc,
7495 ctx.xin->resubmit_stats, &ctx.table_id,
7496 flow->in_port.ofp_port, true, true, ctx.xin->xcache);
7497 if (ctx.xin->resubmit_stats) {
7498 rule_dpif_credit_stats(ctx.rule, ctx.xin->resubmit_stats);
7499 }
7500 if (ctx.xin->xcache) {
7501 struct xc_entry *entry;
7502
7503 entry = xlate_cache_add_entry(ctx.xin->xcache, XC_RULE);
7504 entry->rule = ctx.rule;
7505 ofproto_rule_ref(&ctx.rule->up);
7506 }
7507
7508 xlate_report_table(&ctx, ctx.rule, ctx.table_id);
7509 }
7510
7511 /* Tunnel stats only for not-thawed packets. */
7512 if (!xin->frozen_state && in_port && in_port->is_tunnel) {
7513 if (ctx.xin->resubmit_stats) {
7514 netdev_vport_inc_rx(in_port->netdev, ctx.xin->resubmit_stats);
7515 if (in_port->bfd) {
7516 bfd_account_rx(in_port->bfd, ctx.xin->resubmit_stats);
7517 }
7518 }
7519 if (ctx.xin->xcache) {
7520 struct xc_entry *entry;
7521
7522 entry = xlate_cache_add_entry(ctx.xin->xcache, XC_NETDEV);
7523 entry->dev.rx = netdev_ref(in_port->netdev);
7524 entry->dev.bfd = bfd_ref(in_port->bfd);
7525 }
7526 }
7527
7528 if (!xin->frozen_state && process_special(&ctx, in_port)) {
7529 /* process_special() did all the processing for this packet.
7530 *
7531 * We do not perform special processing on thawed packets, since that
7532 * was done before they were frozen and should not be redone. */
7533 mirror_ingress_packet(&ctx);
7534 } else if (in_port && in_port->xbundle
7535 && xbundle_mirror_out(xbridge, in_port->xbundle)) {
7536 xlate_report_error(&ctx, "dropping packet received on port "
7537 "%s, which is reserved exclusively for mirroring",
7538 in_port->xbundle->name);
7539 } else {
7540 /* Sampling is done on initial reception; don't redo after thawing. */
7541 unsigned int user_cookie_offset = 0;
7542 if (!xin->frozen_state) {
7543 user_cookie_offset = compose_sflow_action(&ctx);
7544 compose_ipfix_action(&ctx, ODPP_NONE);
7545 }
7546 size_t sample_actions_len = ctx.odp_actions->size;
7547
7548 if (tnl_process_ecn(flow)
7549 && (!in_port || may_receive(in_port, &ctx))) {
7550 const struct ofpact *ofpacts;
7551 size_t ofpacts_len;
7552
7553 if (xin->ofpacts) {
7554 ofpacts = xin->ofpacts;
7555 ofpacts_len = xin->ofpacts_len;
7556 } else if (ctx.rule) {
7557 const struct rule_actions *actions
7558 = rule_get_actions(&ctx.rule->up);
7559 ofpacts = actions->ofpacts;
7560 ofpacts_len = actions->ofpacts_len;
7561 ctx.rule_cookie = ctx.rule->up.flow_cookie;
7562 } else {
7563 OVS_NOT_REACHED();
7564 }
7565
7566 mirror_ingress_packet(&ctx);
7567 do_xlate_actions(ofpacts, ofpacts_len, &ctx, true, false);
7568 if (ctx.error) {
7569 goto exit;
7570 }
7571
7572 /* We've let OFPP_NORMAL and the learning action look at the
7573 * packet, so cancel all actions and freezing if forwarding is
7574 * disabled. */
7575 if (in_port && (!xport_stp_forward_state(in_port) ||
7576 !xport_rstp_forward_state(in_port))) {
7577 ctx.odp_actions->size = sample_actions_len;
7578 ctx_cancel_freeze(&ctx);
7579 ofpbuf_clear(&ctx.action_set);
7580 }
7581
7582 if (!ctx.freezing) {
7583 xlate_action_set(&ctx);
7584 }
7585 if (ctx.freezing) {
7586 finish_freezing(&ctx);
7587 }
7588 }
7589
7590 /* Output only fully processed packets. */
7591 if (!ctx.freezing
7592 && xbridge->has_in_band
7593 && in_band_must_output_to_local_port(flow)
7594 && !actions_output_to_local_port(&ctx)) {
7595 WC_MASK_FIELD(ctx.wc, nw_proto);
7596 WC_MASK_FIELD(ctx.wc, tp_src);
7597 WC_MASK_FIELD(ctx.wc, tp_dst);
7598 WC_MASK_FIELD(ctx.wc, dl_type);
7599 xlate_report(&ctx, OFT_DETAIL, "outputting DHCP packet "
7600 "to local port for in-band control");
7601 compose_output_action(&ctx, OFPP_LOCAL, NULL, false, false);
7602 }
7603
7604 if (user_cookie_offset) {
7605 fix_sflow_action(&ctx, user_cookie_offset);
7606 }
7607 }
7608
7609 if (nl_attr_oversized(ctx.odp_actions->size)) {
7610 /* These datapath actions are too big for a Netlink attribute, so we
7611 * can't hand them to the kernel directly. dpif_execute() can execute
7612 * them one by one with help, so just mark the result as SLOW_ACTION to
7613 * prevent the flow from being installed. */
7614 COVERAGE_INC(xlate_actions_oversize);
7615 ctx.xout->slow |= SLOW_ACTION;
7616 } else if (too_many_output_actions(ctx.odp_actions)) {
7617 COVERAGE_INC(xlate_actions_too_many_output);
7618 ctx.xout->slow |= SLOW_ACTION;
7619 }
7620
7621 /* Update NetFlow for non-frozen traffic. */
7622 if (xbridge->netflow && !xin->frozen_state) {
7623 if (ctx.xin->resubmit_stats) {
7624 netflow_flow_update(xbridge->netflow, flow,
7625 ctx.nf_output_iface,
7626 ctx.xin->resubmit_stats);
7627 }
7628 if (ctx.xin->xcache) {
7629 struct xc_entry *entry;
7630
7631 entry = xlate_cache_add_entry(ctx.xin->xcache, XC_NETFLOW);
7632 entry->nf.netflow = netflow_ref(xbridge->netflow);
7633 entry->nf.flow = xmemdup(flow, sizeof *flow);
7634 entry->nf.iface = ctx.nf_output_iface;
7635 }
7636 }
7637
7638 /* Translate tunnel metadata masks to udpif format if necessary. */
7639 if (xin->upcall_flow->tunnel.flags & FLOW_TNL_F_UDPIF) {
7640 if (ctx.wc->masks.tunnel.metadata.present.map) {
7641 const struct flow_tnl *upcall_tnl = &xin->upcall_flow->tunnel;
7642 struct geneve_opt opts[TLV_TOT_OPT_SIZE /
7643 sizeof(struct geneve_opt)];
7644
7645 tun_metadata_to_geneve_udpif_mask(&flow->tunnel,
7646 &ctx.wc->masks.tunnel,
7647 upcall_tnl->metadata.opts.gnv,
7648 upcall_tnl->metadata.present.len,
7649 opts);
7650 memset(&ctx.wc->masks.tunnel.metadata, 0,
7651 sizeof ctx.wc->masks.tunnel.metadata);
7652 memcpy(&ctx.wc->masks.tunnel.metadata.opts.gnv, opts,
7653 upcall_tnl->metadata.present.len);
7654 }
7655 ctx.wc->masks.tunnel.metadata.present.len = 0xff;
7656 ctx.wc->masks.tunnel.metadata.tab = NULL;
7657 ctx.wc->masks.tunnel.flags |= FLOW_TNL_F_UDPIF;
7658 } else if (!xin->upcall_flow->tunnel.metadata.tab) {
7659 /* If we didn't have options in UDPIF format and didn't have an existing
7660 * metadata table, then it means that there were no options at all when
7661 * we started processing and any wildcards we picked up were from
7662 * action generation. Without options on the incoming packet, wildcards
7663 * aren't meaningful. To avoid them possibly getting misinterpreted,
7664 * just clear everything. */
7665 if (ctx.wc->masks.tunnel.metadata.present.map) {
7666 memset(&ctx.wc->masks.tunnel.metadata, 0,
7667 sizeof ctx.wc->masks.tunnel.metadata);
7668 } else {
7669 ctx.wc->masks.tunnel.metadata.tab = NULL;
7670 }
7671 }
7672
7673 xlate_wc_finish(&ctx);
7674
7675 exit:
7676 /* Reset the table to what it was when we came in. If we only fetched
7677 * it locally, then it has no meaning outside of flow translation. */
7678 flow->tunnel.metadata.tab = xin->upcall_flow->tunnel.metadata.tab;
7679
7680 ofpbuf_uninit(&ctx.stack);
7681 ofpbuf_uninit(&ctx.action_set);
7682 ofpbuf_uninit(&ctx.frozen_actions);
7683 ofpbuf_uninit(&scratch_actions);
7684 ofpbuf_delete(ctx.encap_data);
7685
7686 /* Make sure we return a "drop flow" in case of an error. */
7687 if (ctx.error) {
7688 xout->slow = 0;
7689 if (xin->odp_actions) {
7690 ofpbuf_clear(xin->odp_actions);
7691 }
7692 }
7693 return ctx.error;
7694 }
7695
7696 enum ofperr
7697 xlate_resume(struct ofproto_dpif *ofproto,
7698 const struct ofputil_packet_in_private *pin,
7699 struct ofpbuf *odp_actions,
7700 enum slow_path_reason *slow,
7701 struct flow *flow,
7702 struct xlate_cache *xcache)
7703 {
7704 struct dp_packet packet;
7705 dp_packet_use_const(&packet, pin->base.packet,
7706 pin->base.packet_len);
7707
7708 pkt_metadata_from_flow(&packet.md, &pin->base.flow_metadata.flow);
7709 flow_extract(&packet, flow);
7710
7711 struct xlate_in xin;
7712 xlate_in_init(&xin, ofproto, ofproto_dpif_get_tables_version(ofproto),
7713 flow, 0, NULL, ntohs(flow->tcp_flags),
7714 &packet, NULL, odp_actions);
7715 xin.xcache = xcache;
7716
7717 struct ofpact_note noop;
7718 ofpact_init_NOTE(&noop);
7719 noop.length = 0;
7720
7721 bool any_actions = pin->actions_len > 0;
7722 struct frozen_state state = {
7723 .table_id = 0, /* Not the table where NXAST_PAUSE was executed. */
7724 .ofproto_uuid = pin->bridge,
7725 .stack = pin->stack,
7726 .stack_size = pin->stack_size,
7727 .mirrors = pin->mirrors,
7728 .conntracked = pin->conntracked,
7729 .xport_uuid = UUID_ZERO,
7730
7731 /* When there are no actions, xlate_actions() will search the flow
7732 * table. We don't want it to do that (we want it to resume), so
7733 * supply a no-op action if there aren't any.
7734 *
7735 * (We can't necessarily avoid translating actions entirely if there
7736 * aren't any actions, because there might be some finishing-up to do
7737 * at the end of the pipeline, and we don't check for those
7738 * conditions.) */
7739 .ofpacts = any_actions ? pin->actions : &noop.ofpact,
7740 .ofpacts_len = any_actions ? pin->actions_len : sizeof noop,
7741
7742 .action_set = pin->action_set,
7743 .action_set_len = pin->action_set_len,
7744 };
7745 frozen_metadata_from_flow(&state.metadata,
7746 &pin->base.flow_metadata.flow);
7747 xin.frozen_state = &state;
7748
7749 struct xlate_out xout;
7750 enum xlate_error error = xlate_actions(&xin, &xout);
7751 *slow = xout.slow;
7752 xlate_out_uninit(&xout);
7753
7754 /* xlate_actions() can generate a number of errors, but only
7755 * XLATE_BRIDGE_NOT_FOUND really stands out to me as one that we should be
7756 * sure to report over OpenFlow. The others could come up in packet-outs
7757 * or regular flow translation and I don't think that it's going to be too
7758 * useful to report them to the controller. */
7759 return error == XLATE_BRIDGE_NOT_FOUND ? OFPERR_NXR_STALE : 0;
7760 }
7761
7762 /* Sends 'packet' out 'ofport'. If 'port' is a tunnel and that tunnel type
7763 * supports a notion of an OAM flag, sets it if 'oam' is true.
7764 * May modify 'packet'.
7765 * Returns 0 if successful, otherwise a positive errno value. */
7766 int
7767 xlate_send_packet(const struct ofport_dpif *ofport, bool oam,
7768 struct dp_packet *packet)
7769 {
7770 struct xlate_cfg *xcfg = ovsrcu_get(struct xlate_cfg *, &xcfgp);
7771 struct xport *xport;
7772 uint64_t ofpacts_stub[1024 / 8];
7773 struct ofpbuf ofpacts;
7774 struct flow flow;
7775
7776 ofpbuf_use_stack(&ofpacts, ofpacts_stub, sizeof ofpacts_stub);
7777 /* Use OFPP_NONE as the in_port to avoid special packet processing. */
7778 flow_extract(packet, &flow);
7779 flow.in_port.ofp_port = OFPP_NONE;
7780
7781 xport = xport_lookup(xcfg, ofport);
7782 if (!xport) {
7783 return EINVAL;
7784 }
7785
7786 if (oam) {
7787 const ovs_be16 flag = htons(NX_TUN_FLAG_OAM);
7788 ofpact_put_set_field(&ofpacts, mf_from_id(MFF_TUN_FLAGS),
7789 &flag, &flag);
7790 }
7791
7792 ofpact_put_OUTPUT(&ofpacts)->port = xport->ofp_port;
7793
7794 /* Actions here are not referring to anything versionable (flow tables or
7795 * groups) so we don't need to worry about the version here. */
7796 return ofproto_dpif_execute_actions(xport->xbridge->ofproto,
7797 OVS_VERSION_MAX, &flow, NULL,
7798 ofpacts.data, ofpacts.size, packet);
7799 }
7800
7801 void
7802 xlate_mac_learning_update(const struct ofproto_dpif *ofproto,
7803 ofp_port_t in_port, struct eth_addr dl_src,
7804 int vlan, bool is_grat_arp)
7805 {
7806 struct xlate_cfg *xcfg = ovsrcu_get(struct xlate_cfg *, &xcfgp);
7807 struct xbridge *xbridge;
7808 struct xbundle *xbundle;
7809
7810 xbridge = xbridge_lookup(xcfg, ofproto);
7811 if (!xbridge) {
7812 return;
7813 }
7814
7815 xbundle = lookup_input_bundle__(xbridge, in_port, NULL);
7816 if (!xbundle) {
7817 return;
7818 }
7819
7820 update_learning_table__(xbridge, xbundle, dl_src, vlan, is_grat_arp);
7821 }
7822
7823 void
7824 xlate_set_support(const struct ofproto_dpif *ofproto,
7825 const struct dpif_backer_support *support)
7826 {
7827 struct xlate_cfg *xcfg = ovsrcu_get(struct xlate_cfg *, &xcfgp);
7828 struct xbridge *xbridge = xbridge_lookup(xcfg, ofproto);
7829
7830 if (xbridge) {
7831 xbridge->support = *support;
7832 }
7833 }