]> git.proxmox.com Git - ovs.git/blob - ofproto/ofproto-dpif-xlate.c
dpif: Add support for OVS_ACTION_ATTR_CT_CLEAR
[ovs.git] / ofproto / ofproto-dpif-xlate.c
1 /* Copyright (c) 2009, 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017 Nicira, Inc.
2 *
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License. */
14
15 #include <config.h>
16
17 #include "ofproto/ofproto-dpif-xlate.h"
18
19 #include <errno.h>
20 #include <sys/types.h>
21 #include <netinet/in.h>
22 #include <arpa/inet.h>
23 #include <net/if.h>
24 #include <sys/socket.h>
25
26 #include "bfd.h"
27 #include "bitmap.h"
28 #include "bond.h"
29 #include "bundle.h"
30 #include "byte-order.h"
31 #include "cfm.h"
32 #include "connmgr.h"
33 #include "coverage.h"
34 #include "csum.h"
35 #include "dp-packet.h"
36 #include "dpif.h"
37 #include "in-band.h"
38 #include "lacp.h"
39 #include "learn.h"
40 #include "mac-learning.h"
41 #include "mcast-snooping.h"
42 #include "multipath.h"
43 #include "netdev-vport.h"
44 #include "netlink.h"
45 #include "nx-match.h"
46 #include "odp-execute.h"
47 #include "ofproto/ofproto-dpif-ipfix.h"
48 #include "ofproto/ofproto-dpif-mirror.h"
49 #include "ofproto/ofproto-dpif-monitor.h"
50 #include "ofproto/ofproto-dpif-sflow.h"
51 #include "ofproto/ofproto-dpif-trace.h"
52 #include "ofproto/ofproto-dpif-xlate-cache.h"
53 #include "ofproto/ofproto-dpif.h"
54 #include "ofproto/ofproto-provider.h"
55 #include "openvswitch/dynamic-string.h"
56 #include "openvswitch/meta-flow.h"
57 #include "openvswitch/list.h"
58 #include "openvswitch/ofp-actions.h"
59 #include "openvswitch/ofp-ed-props.h"
60 #include "openvswitch/vlog.h"
61 #include "ovs-lldp.h"
62 #include "ovs-router.h"
63 #include "packets.h"
64 #include "tnl-neigh-cache.h"
65 #include "tnl-ports.h"
66 #include "tunnel.h"
67 #include "util.h"
68
69 COVERAGE_DEFINE(xlate_actions);
70 COVERAGE_DEFINE(xlate_actions_oversize);
71 COVERAGE_DEFINE(xlate_actions_too_many_output);
72
73 VLOG_DEFINE_THIS_MODULE(ofproto_dpif_xlate);
74
75 /* Maximum depth of flow table recursion (due to resubmit actions) in a
76 * flow translation.
77 *
78 * The goal of limiting the depth of resubmits is to ensure that flow
79 * translation eventually terminates. Only resubmits to the same table or an
80 * earlier table count against the maximum depth. This is because resubmits to
81 * strictly monotonically increasing table IDs will eventually terminate, since
82 * any OpenFlow switch has a finite number of tables. OpenFlow tables are most
83 * commonly traversed in numerically increasing order, so this limit has little
84 * effect on conventionally designed OpenFlow pipelines.
85 *
86 * Outputs to patch ports and to groups also count against the depth limit. */
87 #define MAX_DEPTH 64
88
89 /* Maximum number of resubmit actions in a flow translation, whether they are
90 * recursive or not. */
91 #define MAX_RESUBMITS (MAX_DEPTH * MAX_DEPTH)
92
93 struct xbridge {
94 struct hmap_node hmap_node; /* Node in global 'xbridges' map. */
95 struct ofproto_dpif *ofproto; /* Key in global 'xbridges' map. */
96
97 struct ovs_list xbundles; /* Owned xbundles. */
98 struct hmap xports; /* Indexed by ofp_port. */
99
100 char *name; /* Name used in log messages. */
101 struct dpif *dpif; /* Datapath interface. */
102 struct mac_learning *ml; /* Mac learning handle. */
103 struct mcast_snooping *ms; /* Multicast Snooping handle. */
104 struct mbridge *mbridge; /* Mirroring. */
105 struct dpif_sflow *sflow; /* SFlow handle, or null. */
106 struct dpif_ipfix *ipfix; /* Ipfix handle, or null. */
107 struct netflow *netflow; /* Netflow handle, or null. */
108 struct stp *stp; /* STP or null if disabled. */
109 struct rstp *rstp; /* RSTP or null if disabled. */
110
111 bool has_in_band; /* Bridge has in band control? */
112 bool forward_bpdu; /* Bridge forwards STP BPDUs? */
113
114 /* Datapath feature support. */
115 struct dpif_backer_support support;
116 };
117
118 struct xbundle {
119 struct hmap_node hmap_node; /* In global 'xbundles' map. */
120 struct ofbundle *ofbundle; /* Key in global 'xbundles' map. */
121
122 struct ovs_list list_node; /* In parent 'xbridges' list. */
123 struct xbridge *xbridge; /* Parent xbridge. */
124
125 struct ovs_list xports; /* Contains "struct xport"s. */
126
127 char *name; /* Name used in log messages. */
128 struct bond *bond; /* Nonnull iff more than one port. */
129 struct lacp *lacp; /* LACP handle or null. */
130
131 enum port_vlan_mode vlan_mode; /* VLAN mode. */
132 uint16_t qinq_ethtype; /* Ethertype of dot1q-tunnel interface
133 * either 0x8100 or 0x88a8. */
134 int vlan; /* -1=trunk port, else a 12-bit VLAN ID. */
135 unsigned long *trunks; /* Bitmap of trunked VLANs, if 'vlan' == -1.
136 * NULL if all VLANs are trunked. */
137 unsigned long *cvlans; /* Bitmap of allowed customer vlans,
138 * NULL if all VLANs are allowed */
139 bool use_priority_tags; /* Use 802.1p tag for frames in VLAN 0? */
140 bool floodable; /* No port has OFPUTIL_PC_NO_FLOOD set? */
141 bool protected; /* Protected port mode */
142 };
143
144 struct xport {
145 struct hmap_node hmap_node; /* Node in global 'xports' map. */
146 struct ofport_dpif *ofport; /* Key in global 'xports map. */
147
148 struct hmap_node ofp_node; /* Node in parent xbridge 'xports' map. */
149 ofp_port_t ofp_port; /* Key in parent xbridge 'xports' map. */
150
151 odp_port_t odp_port; /* Datapath port number or ODPP_NONE. */
152
153 struct ovs_list bundle_node; /* In parent xbundle (if it exists). */
154 struct xbundle *xbundle; /* Parent xbundle or null. */
155
156 struct netdev *netdev; /* 'ofport''s netdev. */
157
158 struct xbridge *xbridge; /* Parent bridge. */
159 struct xport *peer; /* Patch port peer or null. */
160
161 enum ofputil_port_config config; /* OpenFlow port configuration. */
162 enum ofputil_port_state state; /* OpenFlow port state. */
163 int stp_port_no; /* STP port number or -1 if not in use. */
164 struct rstp_port *rstp_port; /* RSTP port or null. */
165
166 struct hmap skb_priorities; /* Map of 'skb_priority_to_dscp's. */
167
168 bool may_enable; /* May be enabled in bonds. */
169 bool is_tunnel; /* Is a tunnel port. */
170 enum netdev_pt_mode pt_mode; /* packet_type handling. */
171
172 struct cfm *cfm; /* CFM handle or null. */
173 struct bfd *bfd; /* BFD handle or null. */
174 struct lldp *lldp; /* LLDP handle or null. */
175 };
176
177 struct xlate_ctx {
178 struct xlate_in *xin;
179 struct xlate_out *xout;
180
181 const struct xbridge *xbridge;
182
183 /* Flow at the last commit. */
184 struct flow base_flow;
185
186 /* Tunnel IP destination address as received. This is stored separately
187 * as the base_flow.tunnel is cleared on init to reflect the datapath
188 * behavior. Used to make sure not to send tunneled output to ourselves,
189 * which might lead to an infinite loop. This could happen easily
190 * if a tunnel is marked as 'ip_remote=flow', and the flow does not
191 * actually set the tun_dst field. */
192 struct in6_addr orig_tunnel_ipv6_dst;
193
194 /* Stack for the push and pop actions. See comment above nx_stack_push()
195 * in nx-match.c for info on how the stack is stored. */
196 struct ofpbuf stack;
197
198 /* The rule that we are currently translating, or NULL. */
199 struct rule_dpif *rule;
200
201 /* Flow translation populates this with wildcards relevant in translation.
202 * When 'xin->wc' is nonnull, this is the same pointer. When 'xin->wc' is
203 * null, this is a pointer to a temporary buffer. */
204 struct flow_wildcards *wc;
205
206 /* Output buffer for datapath actions. When 'xin->odp_actions' is nonnull,
207 * this is the same pointer. When 'xin->odp_actions' is null, this points
208 * to a scratch ofpbuf. This allows code to add actions to
209 * 'ctx->odp_actions' without worrying about whether the caller really
210 * wants actions. */
211 struct ofpbuf *odp_actions;
212
213 /* Statistics maintained by xlate_table_action().
214 *
215 * These statistics limit the amount of work that a single flow
216 * translation can perform. The goal of the first of these, 'depth', is
217 * primarily to prevent translation from performing an infinite amount of
218 * work. It counts the current depth of nested "resubmit"s (and a few
219 * other activities); when a resubmit returns, it decreases. Resubmits to
220 * tables in strictly monotonically increasing order don't contribute to
221 * 'depth' because they cannot cause a flow translation to take an infinite
222 * amount of time (because the number of tables is finite). Translation
223 * aborts when 'depth' exceeds MAX_DEPTH.
224 *
225 * 'resubmits', on the other hand, prevents flow translation from
226 * performing an extraordinarily large while still finite amount of work.
227 * It counts the total number of resubmits (and a few other activities)
228 * that have been executed. Returning from a resubmit does not affect this
229 * counter. Thus, this limits the amount of work that a particular
230 * translation can perform. Translation aborts when 'resubmits' exceeds
231 * MAX_RESUBMITS (which is much larger than MAX_DEPTH).
232 */
233 int depth; /* Current resubmit nesting depth. */
234 int resubmits; /* Total number of resubmits. */
235 bool in_group; /* Currently translating ofgroup, if true. */
236 bool in_action_set; /* Currently translating action_set, if true. */
237 bool in_packet_out; /* Currently translating a packet_out msg, if
238 * true. */
239 bool pending_encap; /* True when waiting to commit a pending
240 * encap action. */
241 struct ofpbuf *encap_data; /* May contain a pointer to an ofpbuf with
242 * context for the datapath encap action.*/
243
244 uint8_t table_id; /* OpenFlow table ID where flow was found. */
245 ovs_be64 rule_cookie; /* Cookie of the rule being translated. */
246 uint32_t orig_skb_priority; /* Priority when packet arrived. */
247 uint32_t sflow_n_outputs; /* Number of output ports. */
248 odp_port_t sflow_odp_port; /* Output port for composing sFlow action. */
249 ofp_port_t nf_output_iface; /* Output interface index for NetFlow. */
250 bool exit; /* No further actions should be processed. */
251 mirror_mask_t mirrors; /* Bitmap of associated mirrors. */
252 int mirror_snaplen; /* Max size of a mirror packet in byte. */
253
254 /* Freezing Translation
255 * ====================
256 *
257 * At some point during translation, the code may recognize the need to halt
258 * and checkpoint the translation in a way that it can be restarted again
259 * later. We call the checkpointing process "freezing" and the restarting
260 * process "thawing".
261 *
262 * The use cases for freezing are:
263 *
264 * - "Recirculation", where the translation process discovers that it
265 * doesn't have enough information to complete translation without
266 * actually executing the actions that have already been translated,
267 * which provides the additionally needed information. In these
268 * situations, translation freezes translation and assigns the frozen
269 * data a unique "recirculation ID", which it associates with the data
270 * in a table in userspace (see ofproto-dpif-rid.h). It also adds a
271 * OVS_ACTION_ATTR_RECIRC action specifying that ID to the datapath
272 * actions. When a packet hits that action, the datapath looks its
273 * flow up again using the ID. If there's a miss, it comes back to
274 * userspace, which find the recirculation table entry for the ID,
275 * thaws the associated frozen data, and continues translation from
276 * that point given the additional information that is now known.
277 *
278 * The archetypal example is MPLS. As MPLS is implemented in
279 * OpenFlow, the protocol that follows the last MPLS label becomes
280 * known only when that label is popped by an OpenFlow action. That
281 * means that Open vSwitch can't extract the headers beyond the MPLS
282 * labels until the pop action is executed. Thus, at that point
283 * translation uses the recirculation process to extract the headers
284 * beyond the MPLS labels.
285 *
286 * (OVS also uses OVS_ACTION_ATTR_RECIRC to implement hashing for
287 * output to bonds. OVS pre-populates all the datapath flows for bond
288 * output in the datapath, though, which means that the elaborate
289 * process of coming back to userspace for a second round of
290 * translation isn't needed, and so bonds don't follow the above
291 * process.)
292 *
293 * - "Continuation". A continuation is a way for an OpenFlow controller
294 * to interpose on a packet's traversal of the OpenFlow tables. When
295 * the translation process encounters a "controller" action with the
296 * "pause" flag, it freezes translation, serializes the frozen data,
297 * and sends it to an OpenFlow controller. The controller then
298 * examines and possibly modifies the frozen data and eventually sends
299 * it back to the switch, which thaws it and continues translation.
300 *
301 * The main problem of freezing translation is preserving state, so that
302 * when the translation is thawed later it resumes from where it left off,
303 * without disruption. In particular, actions must be preserved as follows:
304 *
305 * - If we're freezing because an action needed more information, the
306 * action that prompted it.
307 *
308 * - Any actions remaining to be translated within the current flow.
309 *
310 * - If translation was frozen within a NXAST_RESUBMIT, then any actions
311 * following the resubmit action. Resubmit actions can be nested, so
312 * this has to go all the way up the control stack.
313 *
314 * - The OpenFlow 1.1+ action set.
315 *
316 * State that actions and flow table lookups can depend on, such as the
317 * following, must also be preserved:
318 *
319 * - Metadata fields (input port, registers, OF1.1+ metadata, ...).
320 *
321 * - The stack used by NXAST_STACK_PUSH and NXAST_STACK_POP actions.
322 *
323 * - The table ID and cookie of the flow being translated at each level
324 * of the control stack, because these can become visible through
325 * OFPAT_CONTROLLER actions (and other ways).
326 *
327 * Translation allows for the control of this state preservation via these
328 * members. When a need to freeze translation is identified, the
329 * translation process:
330 *
331 * 1. Sets 'freezing' to true.
332 *
333 * 2. Sets 'exit' to true to tell later steps that we're exiting from the
334 * translation process.
335 *
336 * 3. Adds an OFPACT_UNROLL_XLATE action to 'frozen_actions', and points
337 * frozen_actions.header to the action to make it easy to find it later.
338 * This action holds the current table ID and cookie so that they can be
339 * restored during a post-recirculation upcall translation.
340 *
341 * 4. Adds the action that prompted recirculation and any actions following
342 * it within the same flow to 'frozen_actions', so that they can be
343 * executed during a post-recirculation upcall translation.
344 *
345 * 5. Returns.
346 *
347 * 6. The action that prompted recirculation might be nested in a stack of
348 * nested "resubmit"s that have actions remaining. Each of these notices
349 * that we're exiting and freezing and responds by adding more
350 * OFPACT_UNROLL_XLATE actions to 'frozen_actions', as necessary,
351 * followed by any actions that were yet unprocessed.
352 *
353 * If we're freezing because of recirculation, the caller generates a
354 * recirculation ID and associates all the state produced by this process
355 * with it. For post-recirculation upcall translation, the caller passes it
356 * back in for the new translation to execute. The process yielded a set of
357 * ofpacts that can be translated directly, so it is not much of a special
358 * case at that point.
359 */
360 bool freezing;
361 bool recirc_update_dp_hash; /* Generated recirculation will be preceded
362 * by datapath HASH action to get an updated
363 * dp_hash after recirculation. */
364 uint32_t dp_hash_alg;
365 uint32_t dp_hash_basis;
366 struct ofpbuf frozen_actions;
367 const struct ofpact_controller *pause;
368
369 /* True if a packet was but is no longer MPLS (due to an MPLS pop action).
370 * This is a trigger for recirculation in cases where translating an action
371 * or looking up a flow requires access to the fields of the packet after
372 * the MPLS label stack that was originally present. */
373 bool was_mpls;
374
375 /* True if conntrack has been performed on this packet during processing
376 * on the current bridge. This is used to determine whether conntrack
377 * state from the datapath should be honored after thawing. */
378 bool conntracked;
379
380 /* Pointer to an embedded NAT action in a conntrack action, or NULL. */
381 struct ofpact_nat *ct_nat_action;
382
383 /* OpenFlow 1.1+ action set.
384 *
385 * 'action_set' accumulates "struct ofpact"s added by OFPACT_WRITE_ACTIONS.
386 * When translation is otherwise complete, ofpacts_execute_action_set()
387 * converts it to a set of "struct ofpact"s that can be translated into
388 * datapath actions. */
389 bool action_set_has_group; /* Action set contains OFPACT_GROUP? */
390 struct ofpbuf action_set; /* Action set. */
391
392 enum xlate_error error; /* Translation failed. */
393 };
394
395 /* Structure to track VLAN manipulation */
396 struct xvlan_single {
397 uint16_t tpid;
398 uint16_t vid;
399 uint16_t pcp;
400 };
401
402 struct xvlan {
403 struct xvlan_single v[FLOW_MAX_VLAN_HEADERS];
404 };
405
406 const char *xlate_strerror(enum xlate_error error)
407 {
408 switch (error) {
409 case XLATE_OK:
410 return "OK";
411 case XLATE_BRIDGE_NOT_FOUND:
412 return "Bridge not found";
413 case XLATE_RECURSION_TOO_DEEP:
414 return "Recursion too deep";
415 case XLATE_TOO_MANY_RESUBMITS:
416 return "Too many resubmits";
417 case XLATE_STACK_TOO_DEEP:
418 return "Stack too deep";
419 case XLATE_NO_RECIRCULATION_CONTEXT:
420 return "No recirculation context";
421 case XLATE_RECIRCULATION_CONFLICT:
422 return "Recirculation conflict";
423 case XLATE_TOO_MANY_MPLS_LABELS:
424 return "Too many MPLS labels";
425 case XLATE_INVALID_TUNNEL_METADATA:
426 return "Invalid tunnel metadata";
427 case XLATE_UNSUPPORTED_PACKET_TYPE:
428 return "Unsupported packet type";
429 }
430 return "Unknown error";
431 }
432
433 static void xlate_action_set(struct xlate_ctx *ctx);
434 static void xlate_commit_actions(struct xlate_ctx *ctx);
435
436 static void
437 patch_port_output(struct xlate_ctx *ctx, const struct xport *in_dev,
438 struct xport *out_dev);
439
440 static void
441 ctx_trigger_freeze(struct xlate_ctx *ctx)
442 {
443 ctx->exit = true;
444 ctx->freezing = true;
445 }
446
447 static void
448 ctx_trigger_recirculate_with_hash(struct xlate_ctx *ctx, uint32_t type,
449 uint32_t basis)
450 {
451 ctx->exit = true;
452 ctx->freezing = true;
453 ctx->recirc_update_dp_hash = true;
454 ctx->dp_hash_alg = type;
455 ctx->dp_hash_basis = basis;
456 }
457
458 static bool
459 ctx_first_frozen_action(const struct xlate_ctx *ctx)
460 {
461 return !ctx->frozen_actions.size;
462 }
463
464 static void
465 ctx_cancel_freeze(struct xlate_ctx *ctx)
466 {
467 if (ctx->freezing) {
468 ctx->freezing = false;
469 ctx->recirc_update_dp_hash = false;
470 ofpbuf_clear(&ctx->frozen_actions);
471 ctx->frozen_actions.header = NULL;
472 }
473 }
474
475 static void finish_freezing(struct xlate_ctx *ctx);
476
477 /* A controller may use OFPP_NONE as the ingress port to indicate that
478 * it did not arrive on a "real" port. 'ofpp_none_bundle' exists for
479 * when an input bundle is needed for validation (e.g., mirroring or
480 * OFPP_NORMAL processing). It is not connected to an 'ofproto' or have
481 * any 'port' structs, so care must be taken when dealing with it. */
482 static struct xbundle ofpp_none_bundle = {
483 .name = "OFPP_NONE",
484 .vlan_mode = PORT_VLAN_TRUNK
485 };
486
487 /* Node in 'xport''s 'skb_priorities' map. Used to maintain a map from
488 * 'priority' (the datapath's term for QoS queue) to the dscp bits which all
489 * traffic egressing the 'ofport' with that priority should be marked with. */
490 struct skb_priority_to_dscp {
491 struct hmap_node hmap_node; /* Node in 'ofport_dpif''s 'skb_priorities'. */
492 uint32_t skb_priority; /* Priority of this queue (see struct flow). */
493
494 uint8_t dscp; /* DSCP bits to mark outgoing traffic with. */
495 };
496
497 /* Xlate config contains hash maps of all bridges, bundles and ports.
498 * Xcfgp contains the pointer to the current xlate configuration.
499 * When the main thread needs to change the configuration, it copies xcfgp to
500 * new_xcfg and edits new_xcfg. This enables the use of RCU locking which
501 * does not block handler and revalidator threads. */
502 struct xlate_cfg {
503 struct hmap xbridges;
504 struct hmap xbundles;
505 struct hmap xports;
506 };
507 static OVSRCU_TYPE(struct xlate_cfg *) xcfgp = OVSRCU_INITIALIZER(NULL);
508 static struct xlate_cfg *new_xcfg = NULL;
509
510 typedef void xlate_actions_handler(const struct ofpact *, size_t ofpacts_len,
511 struct xlate_ctx *, bool);
512
513 static bool may_receive(const struct xport *, struct xlate_ctx *);
514 static void do_xlate_actions(const struct ofpact *, size_t ofpacts_len,
515 struct xlate_ctx *, bool);
516 static void clone_xlate_actions(const struct ofpact *, size_t ofpacts_len,
517 struct xlate_ctx *, bool);
518 static void xlate_normal(struct xlate_ctx *);
519 static void xlate_table_action(struct xlate_ctx *, ofp_port_t in_port,
520 uint8_t table_id, bool may_packet_in,
521 bool honor_table_miss, bool with_ct_orig,
522 bool is_last_action, xlate_actions_handler *);
523
524 static bool input_vid_is_valid(const struct xlate_ctx *,
525 uint16_t vid, struct xbundle *);
526 static void xvlan_copy(struct xvlan *dst, const struct xvlan *src);
527 static void xvlan_pop(struct xvlan *src);
528 static void xvlan_push_uninit(struct xvlan *src);
529 static void xvlan_extract(const struct flow *, struct xvlan *);
530 static void xvlan_put(struct flow *, const struct xvlan *);
531 static void xvlan_input_translate(const struct xbundle *,
532 const struct xvlan *in,
533 struct xvlan *xvlan);
534 static void xvlan_output_translate(const struct xbundle *,
535 const struct xvlan *xvlan,
536 struct xvlan *out);
537 static void output_normal(struct xlate_ctx *, const struct xbundle *,
538 const struct xvlan *);
539
540 /* Optional bond recirculation parameter to compose_output_action(). */
541 struct xlate_bond_recirc {
542 uint32_t recirc_id; /* !0 Use recirculation instead of output. */
543 uint8_t hash_alg; /* !0 Compute hash for recirc before. */
544 uint32_t hash_basis; /* Compute hash for recirc before. */
545 };
546
547 static void compose_output_action(struct xlate_ctx *, ofp_port_t ofp_port,
548 const struct xlate_bond_recirc *xr,
549 bool is_last_action, bool truncate);
550
551 static struct xbridge *xbridge_lookup(struct xlate_cfg *,
552 const struct ofproto_dpif *);
553 static struct xbridge *xbridge_lookup_by_uuid(struct xlate_cfg *,
554 const struct uuid *);
555 static struct xbundle *xbundle_lookup(struct xlate_cfg *,
556 const struct ofbundle *);
557 static struct xport *xport_lookup(struct xlate_cfg *,
558 const struct ofport_dpif *);
559 static struct xport *get_ofp_port(const struct xbridge *, ofp_port_t ofp_port);
560 static struct skb_priority_to_dscp *get_skb_priority(const struct xport *,
561 uint32_t skb_priority);
562 static void clear_skb_priorities(struct xport *);
563 static size_t count_skb_priorities(const struct xport *);
564 static bool dscp_from_skb_priority(const struct xport *, uint32_t skb_priority,
565 uint8_t *dscp);
566
567 static void xlate_xbridge_init(struct xlate_cfg *, struct xbridge *);
568 static void xlate_xbundle_init(struct xlate_cfg *, struct xbundle *);
569 static void xlate_xport_init(struct xlate_cfg *, struct xport *);
570 static void xlate_xbridge_set(struct xbridge *, struct dpif *,
571 const struct mac_learning *, struct stp *,
572 struct rstp *, const struct mcast_snooping *,
573 const struct mbridge *,
574 const struct dpif_sflow *,
575 const struct dpif_ipfix *,
576 const struct netflow *,
577 bool forward_bpdu, bool has_in_band,
578 const struct dpif_backer_support *);
579 static void xlate_xbundle_set(struct xbundle *xbundle,
580 enum port_vlan_mode vlan_mode,
581 uint16_t qinq_ethtype, int vlan,
582 unsigned long *trunks, unsigned long *cvlans,
583 bool use_priority_tags,
584 const struct bond *bond, const struct lacp *lacp,
585 bool floodable, bool protected);
586 static void xlate_xport_set(struct xport *xport, odp_port_t odp_port,
587 const struct netdev *netdev, const struct cfm *cfm,
588 const struct bfd *bfd, const struct lldp *lldp,
589 int stp_port_no, const struct rstp_port *rstp_port,
590 enum ofputil_port_config config,
591 enum ofputil_port_state state, bool is_tunnel,
592 bool may_enable);
593 static void xlate_xbridge_remove(struct xlate_cfg *, struct xbridge *);
594 static void xlate_xbundle_remove(struct xlate_cfg *, struct xbundle *);
595 static void xlate_xport_remove(struct xlate_cfg *, struct xport *);
596 static void xlate_xbridge_copy(struct xbridge *);
597 static void xlate_xbundle_copy(struct xbridge *, struct xbundle *);
598 static void xlate_xport_copy(struct xbridge *, struct xbundle *,
599 struct xport *);
600 static void xlate_xcfg_free(struct xlate_cfg *);
601 \f
602 /* Tracing helpers. */
603
604 /* If tracing is enabled in 'ctx', creates a new trace node and appends it to
605 * the list of nodes maintained in ctx->xin. The new node has type 'type' and
606 * its text is created from 'format' by treating it as a printf format string.
607 * Returns the list of nodes embedded within the new trace node; ordinarily,
608 * the calleer can ignore this, but it is useful if the caller needs to nest
609 * more trace nodes within the new node.
610 *
611 * If tracing is not enabled, does nothing and returns NULL. */
612 static struct ovs_list * OVS_PRINTF_FORMAT(3, 4)
613 xlate_report(const struct xlate_ctx *ctx, enum oftrace_node_type type,
614 const char *format, ...)
615 {
616 struct ovs_list *subtrace = NULL;
617 if (OVS_UNLIKELY(ctx->xin->trace)) {
618 va_list args;
619 va_start(args, format);
620 char *text = xvasprintf(format, args);
621 subtrace = &oftrace_report(ctx->xin->trace, type, text)->subs;
622 va_end(args);
623 free(text);
624 }
625 return subtrace;
626 }
627
628 /* This is like xlate_report() for errors that are serious enough that we
629 * should log them even if we are not tracing. */
630 static void OVS_PRINTF_FORMAT(2, 3)
631 xlate_report_error(const struct xlate_ctx *ctx, const char *format, ...)
632 {
633 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
634 if (!OVS_UNLIKELY(ctx->xin->trace)
635 && (!ctx->xin->packet || VLOG_DROP_WARN(&rl))) {
636 return;
637 }
638
639 struct ds s = DS_EMPTY_INITIALIZER;
640 va_list args;
641 va_start(args, format);
642 ds_put_format_valist(&s, format, args);
643 va_end(args);
644
645 if (ctx->xin->trace) {
646 oftrace_report(ctx->xin->trace, OFT_ERROR, ds_cstr(&s));
647 } else {
648 ds_put_cstr(&s, " while processing ");
649 flow_format(&s, &ctx->base_flow, NULL);
650 ds_put_format(&s, " on bridge %s", ctx->xbridge->name);
651 VLOG_WARN("%s", ds_cstr(&s));
652 }
653 ds_destroy(&s);
654 }
655
656 /* This is like xlate_report() for messages that should be logged at debug
657 * level (even if we are not tracing) because they can be valuable for
658 * debugging. */
659 static void OVS_PRINTF_FORMAT(3, 4)
660 xlate_report_debug(const struct xlate_ctx *ctx, enum oftrace_node_type type,
661 const char *format, ...)
662 {
663 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(30, 300);
664 if (!OVS_UNLIKELY(ctx->xin->trace)
665 && (!ctx->xin->packet || VLOG_DROP_DBG(&rl))) {
666 return;
667 }
668
669 struct ds s = DS_EMPTY_INITIALIZER;
670 va_list args;
671 va_start(args, format);
672 ds_put_format_valist(&s, format, args);
673 va_end(args);
674
675 if (ctx->xin->trace) {
676 oftrace_report(ctx->xin->trace, type, ds_cstr(&s));
677 } else {
678 VLOG_DBG("bridge %s: %s", ctx->xbridge->name, ds_cstr(&s));
679 }
680 ds_destroy(&s);
681 }
682
683 /* If tracing is enabled in 'ctx', appends a node of the given 'type' to the
684 * trace, whose text is 'title' followed by a formatted version of the
685 * 'ofpacts_len' OpenFlow actions in 'ofpacts'.
686 *
687 * If tracing is not enabled, does nothing. */
688 static void
689 xlate_report_actions(const struct xlate_ctx *ctx, enum oftrace_node_type type,
690 const char *title,
691 const struct ofpact *ofpacts, size_t ofpacts_len)
692 {
693 if (OVS_UNLIKELY(ctx->xin->trace)) {
694 struct ds s = DS_EMPTY_INITIALIZER;
695 ds_put_format(&s, "%s: ", title);
696 ofpacts_format(ofpacts, ofpacts_len, NULL, &s);
697 oftrace_report(ctx->xin->trace, type, ds_cstr(&s));
698 ds_destroy(&s);
699 }
700 }
701
702 /* If tracing is enabled in 'ctx', appends a node of type OFT_DETAIL to the
703 * trace, whose the message is a formatted version of the OpenFlow action set.
704 * 'verb' should be "was" or "is", depending on whether the action set reported
705 * is the new action set or the old one.
706 *
707 * If tracing is not enabled, does nothing. */
708 static void
709 xlate_report_action_set(const struct xlate_ctx *ctx, const char *verb)
710 {
711 if (OVS_UNLIKELY(ctx->xin->trace)) {
712 struct ofpbuf action_list;
713 ofpbuf_init(&action_list, 0);
714 ofpacts_execute_action_set(&action_list, &ctx->action_set);
715 if (action_list.size) {
716 struct ds s = DS_EMPTY_INITIALIZER;
717 ofpacts_format(action_list.data, action_list.size, NULL, &s);
718 xlate_report(ctx, OFT_DETAIL, "action set %s: %s",
719 verb, ds_cstr(&s));
720 ds_destroy(&s);
721 } else {
722 xlate_report(ctx, OFT_DETAIL, "action set %s empty", verb);
723 }
724 ofpbuf_uninit(&action_list);
725 }
726 }
727
728
729 /* If tracing is enabled in 'ctx', appends a node representing 'rule' (in
730 * OpenFlow table 'table_id') to the trace and makes this node the parent for
731 * future trace nodes. The caller should save ctx->xin->trace before calling
732 * this function, then after tracing all of the activities under the table,
733 * restore its previous value.
734 *
735 * If tracing is not enabled, does nothing. */
736 static void
737 xlate_report_table(const struct xlate_ctx *ctx, struct rule_dpif *rule,
738 uint8_t table_id)
739 {
740 if (OVS_LIKELY(!ctx->xin->trace)) {
741 return;
742 }
743
744 struct ds s = DS_EMPTY_INITIALIZER;
745 ds_put_format(&s, "%2d. ", table_id);
746 if (rule == ctx->xin->ofproto->miss_rule) {
747 ds_put_cstr(&s, "No match, and a \"packet-in\" is called for.");
748 } else if (rule == ctx->xin->ofproto->no_packet_in_rule) {
749 ds_put_cstr(&s, "No match.");
750 } else if (rule == ctx->xin->ofproto->drop_frags_rule) {
751 ds_put_cstr(&s, "Packets are IP fragments and "
752 "the fragment handling mode is \"drop\".");
753 } else {
754 minimatch_format(&rule->up.cr.match,
755 ofproto_get_tun_tab(&ctx->xin->ofproto->up),
756 NULL, &s, OFP_DEFAULT_PRIORITY);
757 if (ds_last(&s) != ' ') {
758 ds_put_cstr(&s, ", ");
759 }
760 ds_put_format(&s, "priority %d", rule->up.cr.priority);
761 if (rule->up.flow_cookie) {
762 ds_put_format(&s, ", cookie %#"PRIx64,
763 ntohll(rule->up.flow_cookie));
764 }
765 }
766 ctx->xin->trace = &oftrace_report(ctx->xin->trace, OFT_TABLE,
767 ds_cstr(&s))->subs;
768 ds_destroy(&s);
769 }
770
771 /* If tracing is enabled in 'ctx', adds an OFT_DETAIL trace node to 'ctx'
772 * reporting the value of subfield 'sf'.
773 *
774 * If tracing is not enabled, does nothing. */
775 static void
776 xlate_report_subfield(const struct xlate_ctx *ctx,
777 const struct mf_subfield *sf)
778 {
779 if (OVS_UNLIKELY(ctx->xin->trace)) {
780 struct ds s = DS_EMPTY_INITIALIZER;
781 mf_format_subfield(sf, &s);
782 ds_put_cstr(&s, " is now ");
783
784 if (sf->ofs == 0 && sf->n_bits >= sf->field->n_bits) {
785 union mf_value value;
786 mf_get_value(sf->field, &ctx->xin->flow, &value);
787 mf_format(sf->field, &value, NULL, NULL, &s);
788 } else {
789 union mf_subvalue cst;
790 mf_read_subfield(sf, &ctx->xin->flow, &cst);
791 ds_put_hex(&s, &cst, sizeof cst);
792 }
793
794 xlate_report(ctx, OFT_DETAIL, "%s", ds_cstr(&s));
795
796 ds_destroy(&s);
797 }
798 }
799 \f
800 static void
801 xlate_xbridge_init(struct xlate_cfg *xcfg, struct xbridge *xbridge)
802 {
803 ovs_list_init(&xbridge->xbundles);
804 hmap_init(&xbridge->xports);
805 hmap_insert(&xcfg->xbridges, &xbridge->hmap_node,
806 hash_pointer(xbridge->ofproto, 0));
807 }
808
809 static void
810 xlate_xbundle_init(struct xlate_cfg *xcfg, struct xbundle *xbundle)
811 {
812 ovs_list_init(&xbundle->xports);
813 ovs_list_insert(&xbundle->xbridge->xbundles, &xbundle->list_node);
814 hmap_insert(&xcfg->xbundles, &xbundle->hmap_node,
815 hash_pointer(xbundle->ofbundle, 0));
816 }
817
818 static void
819 xlate_xport_init(struct xlate_cfg *xcfg, struct xport *xport)
820 {
821 hmap_init(&xport->skb_priorities);
822 hmap_insert(&xcfg->xports, &xport->hmap_node,
823 hash_pointer(xport->ofport, 0));
824 hmap_insert(&xport->xbridge->xports, &xport->ofp_node,
825 hash_ofp_port(xport->ofp_port));
826 }
827
828 static void
829 xlate_xbridge_set(struct xbridge *xbridge,
830 struct dpif *dpif,
831 const struct mac_learning *ml, struct stp *stp,
832 struct rstp *rstp, const struct mcast_snooping *ms,
833 const struct mbridge *mbridge,
834 const struct dpif_sflow *sflow,
835 const struct dpif_ipfix *ipfix,
836 const struct netflow *netflow,
837 bool forward_bpdu, bool has_in_band,
838 const struct dpif_backer_support *support)
839 {
840 if (xbridge->ml != ml) {
841 mac_learning_unref(xbridge->ml);
842 xbridge->ml = mac_learning_ref(ml);
843 }
844
845 if (xbridge->ms != ms) {
846 mcast_snooping_unref(xbridge->ms);
847 xbridge->ms = mcast_snooping_ref(ms);
848 }
849
850 if (xbridge->mbridge != mbridge) {
851 mbridge_unref(xbridge->mbridge);
852 xbridge->mbridge = mbridge_ref(mbridge);
853 }
854
855 if (xbridge->sflow != sflow) {
856 dpif_sflow_unref(xbridge->sflow);
857 xbridge->sflow = dpif_sflow_ref(sflow);
858 }
859
860 if (xbridge->ipfix != ipfix) {
861 dpif_ipfix_unref(xbridge->ipfix);
862 xbridge->ipfix = dpif_ipfix_ref(ipfix);
863 }
864
865 if (xbridge->stp != stp) {
866 stp_unref(xbridge->stp);
867 xbridge->stp = stp_ref(stp);
868 }
869
870 if (xbridge->rstp != rstp) {
871 rstp_unref(xbridge->rstp);
872 xbridge->rstp = rstp_ref(rstp);
873 }
874
875 if (xbridge->netflow != netflow) {
876 netflow_unref(xbridge->netflow);
877 xbridge->netflow = netflow_ref(netflow);
878 }
879
880 xbridge->dpif = dpif;
881 xbridge->forward_bpdu = forward_bpdu;
882 xbridge->has_in_band = has_in_band;
883 xbridge->support = *support;
884 }
885
886 static void
887 xlate_xbundle_set(struct xbundle *xbundle,
888 enum port_vlan_mode vlan_mode, uint16_t qinq_ethtype,
889 int vlan, unsigned long *trunks, unsigned long *cvlans,
890 bool use_priority_tags,
891 const struct bond *bond, const struct lacp *lacp,
892 bool floodable, bool protected)
893 {
894 ovs_assert(xbundle->xbridge);
895
896 xbundle->vlan_mode = vlan_mode;
897 xbundle->qinq_ethtype = qinq_ethtype;
898 xbundle->vlan = vlan;
899 xbundle->trunks = trunks;
900 xbundle->cvlans = cvlans;
901 xbundle->use_priority_tags = use_priority_tags;
902 xbundle->floodable = floodable;
903 xbundle->protected = protected;
904
905 if (xbundle->bond != bond) {
906 bond_unref(xbundle->bond);
907 xbundle->bond = bond_ref(bond);
908 }
909
910 if (xbundle->lacp != lacp) {
911 lacp_unref(xbundle->lacp);
912 xbundle->lacp = lacp_ref(lacp);
913 }
914 }
915
916 static void
917 xlate_xport_set(struct xport *xport, odp_port_t odp_port,
918 const struct netdev *netdev, const struct cfm *cfm,
919 const struct bfd *bfd, const struct lldp *lldp, int stp_port_no,
920 const struct rstp_port* rstp_port,
921 enum ofputil_port_config config, enum ofputil_port_state state,
922 bool is_tunnel, bool may_enable)
923 {
924 xport->config = config;
925 xport->state = state;
926 xport->stp_port_no = stp_port_no;
927 xport->is_tunnel = is_tunnel;
928 xport->pt_mode = netdev_get_pt_mode(netdev);
929 xport->may_enable = may_enable;
930 xport->odp_port = odp_port;
931
932 if (xport->rstp_port != rstp_port) {
933 rstp_port_unref(xport->rstp_port);
934 xport->rstp_port = rstp_port_ref(rstp_port);
935 }
936
937 if (xport->cfm != cfm) {
938 cfm_unref(xport->cfm);
939 xport->cfm = cfm_ref(cfm);
940 }
941
942 if (xport->bfd != bfd) {
943 bfd_unref(xport->bfd);
944 xport->bfd = bfd_ref(bfd);
945 }
946
947 if (xport->lldp != lldp) {
948 lldp_unref(xport->lldp);
949 xport->lldp = lldp_ref(lldp);
950 }
951
952 if (xport->netdev != netdev) {
953 netdev_close(xport->netdev);
954 xport->netdev = netdev_ref(netdev);
955 }
956 }
957
958 static void
959 xlate_xbridge_copy(struct xbridge *xbridge)
960 {
961 struct xbundle *xbundle;
962 struct xport *xport;
963 struct xbridge *new_xbridge = xzalloc(sizeof *xbridge);
964 new_xbridge->ofproto = xbridge->ofproto;
965 new_xbridge->name = xstrdup(xbridge->name);
966 xlate_xbridge_init(new_xcfg, new_xbridge);
967
968 xlate_xbridge_set(new_xbridge,
969 xbridge->dpif, xbridge->ml, xbridge->stp,
970 xbridge->rstp, xbridge->ms, xbridge->mbridge,
971 xbridge->sflow, xbridge->ipfix, xbridge->netflow,
972 xbridge->forward_bpdu, xbridge->has_in_band,
973 &xbridge->support);
974 LIST_FOR_EACH (xbundle, list_node, &xbridge->xbundles) {
975 xlate_xbundle_copy(new_xbridge, xbundle);
976 }
977
978 /* Copy xports which are not part of a xbundle */
979 HMAP_FOR_EACH (xport, ofp_node, &xbridge->xports) {
980 if (!xport->xbundle) {
981 xlate_xport_copy(new_xbridge, NULL, xport);
982 }
983 }
984 }
985
986 static void
987 xlate_xbundle_copy(struct xbridge *xbridge, struct xbundle *xbundle)
988 {
989 struct xport *xport;
990 struct xbundle *new_xbundle = xzalloc(sizeof *xbundle);
991 new_xbundle->ofbundle = xbundle->ofbundle;
992 new_xbundle->xbridge = xbridge;
993 new_xbundle->name = xstrdup(xbundle->name);
994 xlate_xbundle_init(new_xcfg, new_xbundle);
995
996 xlate_xbundle_set(new_xbundle, xbundle->vlan_mode, xbundle->qinq_ethtype,
997 xbundle->vlan, xbundle->trunks, xbundle->cvlans,
998 xbundle->use_priority_tags, xbundle->bond, xbundle->lacp,
999 xbundle->floodable, xbundle->protected);
1000 LIST_FOR_EACH (xport, bundle_node, &xbundle->xports) {
1001 xlate_xport_copy(xbridge, new_xbundle, xport);
1002 }
1003 }
1004
1005 static void
1006 xlate_xport_copy(struct xbridge *xbridge, struct xbundle *xbundle,
1007 struct xport *xport)
1008 {
1009 struct skb_priority_to_dscp *pdscp, *new_pdscp;
1010 struct xport *new_xport = xzalloc(sizeof *xport);
1011 new_xport->ofport = xport->ofport;
1012 new_xport->ofp_port = xport->ofp_port;
1013 new_xport->xbridge = xbridge;
1014 xlate_xport_init(new_xcfg, new_xport);
1015
1016 xlate_xport_set(new_xport, xport->odp_port, xport->netdev, xport->cfm,
1017 xport->bfd, xport->lldp, xport->stp_port_no,
1018 xport->rstp_port, xport->config, xport->state,
1019 xport->is_tunnel, xport->may_enable);
1020
1021 if (xport->peer) {
1022 struct xport *peer = xport_lookup(new_xcfg, xport->peer->ofport);
1023 if (peer) {
1024 new_xport->peer = peer;
1025 new_xport->peer->peer = new_xport;
1026 }
1027 }
1028
1029 if (xbundle) {
1030 new_xport->xbundle = xbundle;
1031 ovs_list_insert(&new_xport->xbundle->xports, &new_xport->bundle_node);
1032 }
1033
1034 HMAP_FOR_EACH (pdscp, hmap_node, &xport->skb_priorities) {
1035 new_pdscp = xmalloc(sizeof *pdscp);
1036 new_pdscp->skb_priority = pdscp->skb_priority;
1037 new_pdscp->dscp = pdscp->dscp;
1038 hmap_insert(&new_xport->skb_priorities, &new_pdscp->hmap_node,
1039 hash_int(new_pdscp->skb_priority, 0));
1040 }
1041 }
1042
1043 /* Sets the current xlate configuration to new_xcfg and frees the old xlate
1044 * configuration in xcfgp.
1045 *
1046 * This needs to be called after editing the xlate configuration.
1047 *
1048 * Functions that edit the new xlate configuration are
1049 * xlate_<ofproto/bundle/ofport>_set and xlate_<ofproto/bundle/ofport>_remove.
1050 *
1051 * A sample workflow:
1052 *
1053 * xlate_txn_start();
1054 * ...
1055 * edit_xlate_configuration();
1056 * ...
1057 * xlate_txn_commit(); */
1058 void
1059 xlate_txn_commit(void)
1060 {
1061 struct xlate_cfg *xcfg = ovsrcu_get(struct xlate_cfg *, &xcfgp);
1062
1063 ovsrcu_set(&xcfgp, new_xcfg);
1064 ovsrcu_synchronize();
1065 xlate_xcfg_free(xcfg);
1066 new_xcfg = NULL;
1067 }
1068
1069 /* Copies the current xlate configuration in xcfgp to new_xcfg.
1070 *
1071 * This needs to be called prior to editing the xlate configuration. */
1072 void
1073 xlate_txn_start(void)
1074 {
1075 struct xbridge *xbridge;
1076 struct xlate_cfg *xcfg;
1077
1078 ovs_assert(!new_xcfg);
1079
1080 new_xcfg = xmalloc(sizeof *new_xcfg);
1081 hmap_init(&new_xcfg->xbridges);
1082 hmap_init(&new_xcfg->xbundles);
1083 hmap_init(&new_xcfg->xports);
1084
1085 xcfg = ovsrcu_get(struct xlate_cfg *, &xcfgp);
1086 if (!xcfg) {
1087 return;
1088 }
1089
1090 HMAP_FOR_EACH (xbridge, hmap_node, &xcfg->xbridges) {
1091 xlate_xbridge_copy(xbridge);
1092 }
1093 }
1094
1095
1096 static void
1097 xlate_xcfg_free(struct xlate_cfg *xcfg)
1098 {
1099 struct xbridge *xbridge, *next_xbridge;
1100
1101 if (!xcfg) {
1102 return;
1103 }
1104
1105 HMAP_FOR_EACH_SAFE (xbridge, next_xbridge, hmap_node, &xcfg->xbridges) {
1106 xlate_xbridge_remove(xcfg, xbridge);
1107 }
1108
1109 hmap_destroy(&xcfg->xbridges);
1110 hmap_destroy(&xcfg->xbundles);
1111 hmap_destroy(&xcfg->xports);
1112 free(xcfg);
1113 }
1114
1115 void
1116 xlate_ofproto_set(struct ofproto_dpif *ofproto, const char *name,
1117 struct dpif *dpif,
1118 const struct mac_learning *ml, struct stp *stp,
1119 struct rstp *rstp, const struct mcast_snooping *ms,
1120 const struct mbridge *mbridge,
1121 const struct dpif_sflow *sflow,
1122 const struct dpif_ipfix *ipfix,
1123 const struct netflow *netflow,
1124 bool forward_bpdu, bool has_in_band,
1125 const struct dpif_backer_support *support)
1126 {
1127 struct xbridge *xbridge;
1128
1129 ovs_assert(new_xcfg);
1130
1131 xbridge = xbridge_lookup(new_xcfg, ofproto);
1132 if (!xbridge) {
1133 xbridge = xzalloc(sizeof *xbridge);
1134 xbridge->ofproto = ofproto;
1135
1136 xlate_xbridge_init(new_xcfg, xbridge);
1137 }
1138
1139 free(xbridge->name);
1140 xbridge->name = xstrdup(name);
1141
1142 xlate_xbridge_set(xbridge, dpif, ml, stp, rstp, ms, mbridge, sflow, ipfix,
1143 netflow, forward_bpdu, has_in_band, support);
1144 }
1145
1146 static void
1147 xlate_xbridge_remove(struct xlate_cfg *xcfg, struct xbridge *xbridge)
1148 {
1149 struct xbundle *xbundle, *next_xbundle;
1150 struct xport *xport, *next_xport;
1151
1152 if (!xbridge) {
1153 return;
1154 }
1155
1156 HMAP_FOR_EACH_SAFE (xport, next_xport, ofp_node, &xbridge->xports) {
1157 xlate_xport_remove(xcfg, xport);
1158 }
1159
1160 LIST_FOR_EACH_SAFE (xbundle, next_xbundle, list_node, &xbridge->xbundles) {
1161 xlate_xbundle_remove(xcfg, xbundle);
1162 }
1163
1164 hmap_remove(&xcfg->xbridges, &xbridge->hmap_node);
1165 mac_learning_unref(xbridge->ml);
1166 mcast_snooping_unref(xbridge->ms);
1167 mbridge_unref(xbridge->mbridge);
1168 dpif_sflow_unref(xbridge->sflow);
1169 dpif_ipfix_unref(xbridge->ipfix);
1170 netflow_unref(xbridge->netflow);
1171 stp_unref(xbridge->stp);
1172 rstp_unref(xbridge->rstp);
1173 hmap_destroy(&xbridge->xports);
1174 free(xbridge->name);
1175 free(xbridge);
1176 }
1177
1178 void
1179 xlate_remove_ofproto(struct ofproto_dpif *ofproto)
1180 {
1181 struct xbridge *xbridge;
1182
1183 ovs_assert(new_xcfg);
1184
1185 xbridge = xbridge_lookup(new_xcfg, ofproto);
1186 xlate_xbridge_remove(new_xcfg, xbridge);
1187 }
1188
1189 void
1190 xlate_bundle_set(struct ofproto_dpif *ofproto, struct ofbundle *ofbundle,
1191 const char *name, enum port_vlan_mode vlan_mode,
1192 uint16_t qinq_ethtype, int vlan,
1193 unsigned long *trunks, unsigned long *cvlans,
1194 bool use_priority_tags,
1195 const struct bond *bond, const struct lacp *lacp,
1196 bool floodable, bool protected)
1197 {
1198 struct xbundle *xbundle;
1199
1200 ovs_assert(new_xcfg);
1201
1202 xbundle = xbundle_lookup(new_xcfg, ofbundle);
1203 if (!xbundle) {
1204 xbundle = xzalloc(sizeof *xbundle);
1205 xbundle->ofbundle = ofbundle;
1206 xbundle->xbridge = xbridge_lookup(new_xcfg, ofproto);
1207
1208 xlate_xbundle_init(new_xcfg, xbundle);
1209 }
1210
1211 free(xbundle->name);
1212 xbundle->name = xstrdup(name);
1213
1214 xlate_xbundle_set(xbundle, vlan_mode, qinq_ethtype, vlan, trunks, cvlans,
1215 use_priority_tags, bond, lacp, floodable, protected);
1216 }
1217
1218 static void
1219 xlate_xbundle_remove(struct xlate_cfg *xcfg, struct xbundle *xbundle)
1220 {
1221 struct xport *xport;
1222
1223 if (!xbundle) {
1224 return;
1225 }
1226
1227 LIST_FOR_EACH_POP (xport, bundle_node, &xbundle->xports) {
1228 xport->xbundle = NULL;
1229 }
1230
1231 hmap_remove(&xcfg->xbundles, &xbundle->hmap_node);
1232 ovs_list_remove(&xbundle->list_node);
1233 bond_unref(xbundle->bond);
1234 lacp_unref(xbundle->lacp);
1235 free(xbundle->name);
1236 free(xbundle);
1237 }
1238
1239 void
1240 xlate_bundle_remove(struct ofbundle *ofbundle)
1241 {
1242 struct xbundle *xbundle;
1243
1244 ovs_assert(new_xcfg);
1245
1246 xbundle = xbundle_lookup(new_xcfg, ofbundle);
1247 xlate_xbundle_remove(new_xcfg, xbundle);
1248 }
1249
1250 void
1251 xlate_ofport_set(struct ofproto_dpif *ofproto, struct ofbundle *ofbundle,
1252 struct ofport_dpif *ofport, ofp_port_t ofp_port,
1253 odp_port_t odp_port, const struct netdev *netdev,
1254 const struct cfm *cfm, const struct bfd *bfd,
1255 const struct lldp *lldp, struct ofport_dpif *peer,
1256 int stp_port_no, const struct rstp_port *rstp_port,
1257 const struct ofproto_port_queue *qdscp_list, size_t n_qdscp,
1258 enum ofputil_port_config config,
1259 enum ofputil_port_state state, bool is_tunnel,
1260 bool may_enable)
1261 {
1262 size_t i;
1263 struct xport *xport;
1264
1265 ovs_assert(new_xcfg);
1266
1267 xport = xport_lookup(new_xcfg, ofport);
1268 if (!xport) {
1269 xport = xzalloc(sizeof *xport);
1270 xport->ofport = ofport;
1271 xport->xbridge = xbridge_lookup(new_xcfg, ofproto);
1272 xport->ofp_port = ofp_port;
1273
1274 xlate_xport_init(new_xcfg, xport);
1275 }
1276
1277 ovs_assert(xport->ofp_port == ofp_port);
1278
1279 xlate_xport_set(xport, odp_port, netdev, cfm, bfd, lldp,
1280 stp_port_no, rstp_port, config, state, is_tunnel,
1281 may_enable);
1282
1283 if (xport->peer) {
1284 xport->peer->peer = NULL;
1285 }
1286 xport->peer = xport_lookup(new_xcfg, peer);
1287 if (xport->peer) {
1288 xport->peer->peer = xport;
1289 }
1290
1291 if (xport->xbundle) {
1292 ovs_list_remove(&xport->bundle_node);
1293 }
1294 xport->xbundle = xbundle_lookup(new_xcfg, ofbundle);
1295 if (xport->xbundle) {
1296 ovs_list_insert(&xport->xbundle->xports, &xport->bundle_node);
1297 }
1298
1299 clear_skb_priorities(xport);
1300 for (i = 0; i < n_qdscp; i++) {
1301 struct skb_priority_to_dscp *pdscp;
1302 uint32_t skb_priority;
1303
1304 if (dpif_queue_to_priority(xport->xbridge->dpif, qdscp_list[i].queue,
1305 &skb_priority)) {
1306 continue;
1307 }
1308
1309 pdscp = xmalloc(sizeof *pdscp);
1310 pdscp->skb_priority = skb_priority;
1311 pdscp->dscp = (qdscp_list[i].dscp << 2) & IP_DSCP_MASK;
1312 hmap_insert(&xport->skb_priorities, &pdscp->hmap_node,
1313 hash_int(pdscp->skb_priority, 0));
1314 }
1315 }
1316
1317 static void
1318 xlate_xport_remove(struct xlate_cfg *xcfg, struct xport *xport)
1319 {
1320 if (!xport) {
1321 return;
1322 }
1323
1324 if (xport->peer) {
1325 xport->peer->peer = NULL;
1326 xport->peer = NULL;
1327 }
1328
1329 if (xport->xbundle) {
1330 ovs_list_remove(&xport->bundle_node);
1331 }
1332
1333 clear_skb_priorities(xport);
1334 hmap_destroy(&xport->skb_priorities);
1335
1336 hmap_remove(&xcfg->xports, &xport->hmap_node);
1337 hmap_remove(&xport->xbridge->xports, &xport->ofp_node);
1338
1339 netdev_close(xport->netdev);
1340 rstp_port_unref(xport->rstp_port);
1341 cfm_unref(xport->cfm);
1342 bfd_unref(xport->bfd);
1343 lldp_unref(xport->lldp);
1344 free(xport);
1345 }
1346
1347 void
1348 xlate_ofport_remove(struct ofport_dpif *ofport)
1349 {
1350 struct xport *xport;
1351
1352 ovs_assert(new_xcfg);
1353
1354 xport = xport_lookup(new_xcfg, ofport);
1355 xlate_xport_remove(new_xcfg, xport);
1356 }
1357
1358 static struct ofproto_dpif *
1359 xlate_lookup_ofproto_(const struct dpif_backer *backer, const struct flow *flow,
1360 ofp_port_t *ofp_in_port, const struct xport **xportp)
1361 {
1362 struct xlate_cfg *xcfg = ovsrcu_get(struct xlate_cfg *, &xcfgp);
1363 const struct xport *xport;
1364
1365 xport = xport_lookup(xcfg, tnl_port_should_receive(flow)
1366 ? tnl_port_receive(flow)
1367 : odp_port_to_ofport(backer, flow->in_port.odp_port));
1368 if (OVS_UNLIKELY(!xport)) {
1369 return NULL;
1370 }
1371 *xportp = xport;
1372 if (ofp_in_port) {
1373 *ofp_in_port = xport->ofp_port;
1374 }
1375 return xport->xbridge->ofproto;
1376 }
1377
1378 /* Given a datapath and flow metadata ('backer', and 'flow' respectively)
1379 * returns the corresponding struct ofproto_dpif and OpenFlow port number. */
1380 struct ofproto_dpif *
1381 xlate_lookup_ofproto(const struct dpif_backer *backer, const struct flow *flow,
1382 ofp_port_t *ofp_in_port)
1383 {
1384 const struct xport *xport;
1385
1386 return xlate_lookup_ofproto_(backer, flow, ofp_in_port, &xport);
1387 }
1388
1389 /* Given a datapath and flow metadata ('backer', and 'flow' respectively),
1390 * optionally populates 'ofprotop' with the ofproto_dpif, 'ofp_in_port' with the
1391 * openflow in_port, and 'ipfix', 'sflow', and 'netflow' with the appropriate
1392 * handles for those protocols if they're enabled. Caller may use the returned
1393 * pointers until quiescing, for longer term use additional references must
1394 * be taken.
1395 *
1396 * Returns 0 if successful, ENODEV if the parsed flow has no associated ofproto.
1397 */
1398 int
1399 xlate_lookup(const struct dpif_backer *backer, const struct flow *flow,
1400 struct ofproto_dpif **ofprotop, struct dpif_ipfix **ipfix,
1401 struct dpif_sflow **sflow, struct netflow **netflow,
1402 ofp_port_t *ofp_in_port)
1403 {
1404 struct ofproto_dpif *ofproto;
1405 const struct xport *xport;
1406
1407 ofproto = xlate_lookup_ofproto_(backer, flow, ofp_in_port, &xport);
1408
1409 if (!ofproto) {
1410 return ENODEV;
1411 }
1412
1413 if (ofprotop) {
1414 *ofprotop = ofproto;
1415 }
1416
1417 if (ipfix) {
1418 *ipfix = xport ? xport->xbridge->ipfix : NULL;
1419 }
1420
1421 if (sflow) {
1422 *sflow = xport ? xport->xbridge->sflow : NULL;
1423 }
1424
1425 if (netflow) {
1426 *netflow = xport ? xport->xbridge->netflow : NULL;
1427 }
1428
1429 return 0;
1430 }
1431
1432 static struct xbridge *
1433 xbridge_lookup(struct xlate_cfg *xcfg, const struct ofproto_dpif *ofproto)
1434 {
1435 struct hmap *xbridges;
1436 struct xbridge *xbridge;
1437
1438 if (!ofproto || !xcfg) {
1439 return NULL;
1440 }
1441
1442 xbridges = &xcfg->xbridges;
1443
1444 HMAP_FOR_EACH_IN_BUCKET (xbridge, hmap_node, hash_pointer(ofproto, 0),
1445 xbridges) {
1446 if (xbridge->ofproto == ofproto) {
1447 return xbridge;
1448 }
1449 }
1450 return NULL;
1451 }
1452
1453 static struct xbridge *
1454 xbridge_lookup_by_uuid(struct xlate_cfg *xcfg, const struct uuid *uuid)
1455 {
1456 struct xbridge *xbridge;
1457
1458 HMAP_FOR_EACH (xbridge, hmap_node, &xcfg->xbridges) {
1459 if (uuid_equals(&xbridge->ofproto->uuid, uuid)) {
1460 return xbridge;
1461 }
1462 }
1463 return NULL;
1464 }
1465
1466 static struct xbundle *
1467 xbundle_lookup(struct xlate_cfg *xcfg, const struct ofbundle *ofbundle)
1468 {
1469 struct hmap *xbundles;
1470 struct xbundle *xbundle;
1471
1472 if (!ofbundle || !xcfg) {
1473 return NULL;
1474 }
1475
1476 xbundles = &xcfg->xbundles;
1477
1478 HMAP_FOR_EACH_IN_BUCKET (xbundle, hmap_node, hash_pointer(ofbundle, 0),
1479 xbundles) {
1480 if (xbundle->ofbundle == ofbundle) {
1481 return xbundle;
1482 }
1483 }
1484 return NULL;
1485 }
1486
1487 static struct xport *
1488 xport_lookup(struct xlate_cfg *xcfg, const struct ofport_dpif *ofport)
1489 {
1490 struct hmap *xports;
1491 struct xport *xport;
1492
1493 if (!ofport || !xcfg) {
1494 return NULL;
1495 }
1496
1497 xports = &xcfg->xports;
1498
1499 HMAP_FOR_EACH_IN_BUCKET (xport, hmap_node, hash_pointer(ofport, 0),
1500 xports) {
1501 if (xport->ofport == ofport) {
1502 return xport;
1503 }
1504 }
1505 return NULL;
1506 }
1507
1508 static struct stp_port *
1509 xport_get_stp_port(const struct xport *xport)
1510 {
1511 return xport->xbridge->stp && xport->stp_port_no != -1
1512 ? stp_get_port(xport->xbridge->stp, xport->stp_port_no)
1513 : NULL;
1514 }
1515
1516 static bool
1517 xport_stp_learn_state(const struct xport *xport)
1518 {
1519 struct stp_port *sp = xport_get_stp_port(xport);
1520 return sp
1521 ? stp_learn_in_state(stp_port_get_state(sp))
1522 : true;
1523 }
1524
1525 static bool
1526 xport_stp_forward_state(const struct xport *xport)
1527 {
1528 struct stp_port *sp = xport_get_stp_port(xport);
1529 return sp
1530 ? stp_forward_in_state(stp_port_get_state(sp))
1531 : true;
1532 }
1533
1534 static bool
1535 xport_stp_should_forward_bpdu(const struct xport *xport)
1536 {
1537 struct stp_port *sp = xport_get_stp_port(xport);
1538 return stp_should_forward_bpdu(sp ? stp_port_get_state(sp) : STP_DISABLED);
1539 }
1540
1541 /* Returns true if STP should process 'flow'. Sets fields in 'wc' that
1542 * were used to make the determination.*/
1543 static bool
1544 stp_should_process_flow(const struct flow *flow, struct flow_wildcards *wc)
1545 {
1546 /* is_stp() also checks dl_type, but dl_type is always set in 'wc'. */
1547 memset(&wc->masks.dl_dst, 0xff, sizeof wc->masks.dl_dst);
1548 return is_stp(flow);
1549 }
1550
1551 static void
1552 stp_process_packet(const struct xport *xport, const struct dp_packet *packet)
1553 {
1554 struct stp_port *sp = xport_get_stp_port(xport);
1555 struct dp_packet payload = *packet;
1556 struct eth_header *eth = dp_packet_data(&payload);
1557
1558 /* Sink packets on ports that have STP disabled when the bridge has
1559 * STP enabled. */
1560 if (!sp || stp_port_get_state(sp) == STP_DISABLED) {
1561 return;
1562 }
1563
1564 /* Trim off padding on payload. */
1565 if (dp_packet_size(&payload) > ntohs(eth->eth_type) + ETH_HEADER_LEN) {
1566 dp_packet_set_size(&payload, ntohs(eth->eth_type) + ETH_HEADER_LEN);
1567 }
1568
1569 if (dp_packet_try_pull(&payload, ETH_HEADER_LEN + LLC_HEADER_LEN)) {
1570 stp_received_bpdu(sp, dp_packet_data(&payload), dp_packet_size(&payload));
1571 }
1572 }
1573
1574 static enum rstp_state
1575 xport_get_rstp_port_state(const struct xport *xport)
1576 {
1577 return xport->rstp_port
1578 ? rstp_port_get_state(xport->rstp_port)
1579 : RSTP_DISABLED;
1580 }
1581
1582 static bool
1583 xport_rstp_learn_state(const struct xport *xport)
1584 {
1585 return xport->xbridge->rstp && xport->rstp_port
1586 ? rstp_learn_in_state(xport_get_rstp_port_state(xport))
1587 : true;
1588 }
1589
1590 static bool
1591 xport_rstp_forward_state(const struct xport *xport)
1592 {
1593 return xport->xbridge->rstp && xport->rstp_port
1594 ? rstp_forward_in_state(xport_get_rstp_port_state(xport))
1595 : true;
1596 }
1597
1598 static bool
1599 xport_rstp_should_manage_bpdu(const struct xport *xport)
1600 {
1601 return rstp_should_manage_bpdu(xport_get_rstp_port_state(xport));
1602 }
1603
1604 static void
1605 rstp_process_packet(const struct xport *xport, const struct dp_packet *packet)
1606 {
1607 struct dp_packet payload = *packet;
1608 struct eth_header *eth = dp_packet_data(&payload);
1609
1610 /* Sink packets on ports that have no RSTP. */
1611 if (!xport->rstp_port) {
1612 return;
1613 }
1614
1615 /* Trim off padding on payload. */
1616 if (dp_packet_size(&payload) > ntohs(eth->eth_type) + ETH_HEADER_LEN) {
1617 dp_packet_set_size(&payload, ntohs(eth->eth_type) + ETH_HEADER_LEN);
1618 }
1619
1620 if (dp_packet_try_pull(&payload, ETH_HEADER_LEN + LLC_HEADER_LEN)) {
1621 rstp_port_received_bpdu(xport->rstp_port, dp_packet_data(&payload),
1622 dp_packet_size(&payload));
1623 }
1624 }
1625
1626 static struct xport *
1627 get_ofp_port(const struct xbridge *xbridge, ofp_port_t ofp_port)
1628 {
1629 struct xport *xport;
1630
1631 HMAP_FOR_EACH_IN_BUCKET (xport, ofp_node, hash_ofp_port(ofp_port),
1632 &xbridge->xports) {
1633 if (xport->ofp_port == ofp_port) {
1634 return xport;
1635 }
1636 }
1637 return NULL;
1638 }
1639
1640 static odp_port_t
1641 ofp_port_to_odp_port(const struct xbridge *xbridge, ofp_port_t ofp_port)
1642 {
1643 const struct xport *xport = get_ofp_port(xbridge, ofp_port);
1644 return xport ? xport->odp_port : ODPP_NONE;
1645 }
1646
1647 static bool
1648 odp_port_is_alive(const struct xlate_ctx *ctx, ofp_port_t ofp_port)
1649 {
1650 struct xport *xport = get_ofp_port(ctx->xbridge, ofp_port);
1651 return xport && xport->may_enable;
1652 }
1653
1654 static struct ofputil_bucket *
1655 group_first_live_bucket(const struct xlate_ctx *, const struct group_dpif *,
1656 int depth);
1657
1658 static bool
1659 group_is_alive(const struct xlate_ctx *ctx, uint32_t group_id, int depth)
1660 {
1661 struct group_dpif *group;
1662
1663 group = group_dpif_lookup(ctx->xbridge->ofproto, group_id,
1664 ctx->xin->tables_version, false);
1665 if (group) {
1666 return group_first_live_bucket(ctx, group, depth) != NULL;
1667 }
1668
1669 return false;
1670 }
1671
1672 #define MAX_LIVENESS_RECURSION 128 /* Arbitrary limit */
1673
1674 static bool
1675 bucket_is_alive(const struct xlate_ctx *ctx,
1676 struct ofputil_bucket *bucket, int depth)
1677 {
1678 if (depth >= MAX_LIVENESS_RECURSION) {
1679 xlate_report_error(ctx, "bucket chaining exceeded %d links",
1680 MAX_LIVENESS_RECURSION);
1681 return false;
1682 }
1683
1684 return (!ofputil_bucket_has_liveness(bucket)
1685 || (bucket->watch_port != OFPP_ANY
1686 && odp_port_is_alive(ctx, bucket->watch_port))
1687 || (bucket->watch_group != OFPG_ANY
1688 && group_is_alive(ctx, bucket->watch_group, depth + 1)));
1689 }
1690
1691 static struct ofputil_bucket *
1692 group_first_live_bucket(const struct xlate_ctx *ctx,
1693 const struct group_dpif *group, int depth)
1694 {
1695 struct ofputil_bucket *bucket;
1696 LIST_FOR_EACH (bucket, list_node, &group->up.buckets) {
1697 if (bucket_is_alive(ctx, bucket, depth)) {
1698 return bucket;
1699 }
1700 }
1701
1702 return NULL;
1703 }
1704
1705 static struct ofputil_bucket *
1706 group_best_live_bucket(const struct xlate_ctx *ctx,
1707 const struct group_dpif *group,
1708 uint32_t basis)
1709 {
1710 struct ofputil_bucket *best_bucket = NULL;
1711 uint32_t best_score = 0;
1712
1713 struct ofputil_bucket *bucket;
1714 LIST_FOR_EACH (bucket, list_node, &group->up.buckets) {
1715 if (bucket_is_alive(ctx, bucket, 0)) {
1716 uint32_t score =
1717 (hash_int(bucket->bucket_id, basis) & 0xffff) * bucket->weight;
1718 if (score >= best_score) {
1719 best_bucket = bucket;
1720 best_score = score;
1721 }
1722 }
1723 }
1724
1725 return best_bucket;
1726 }
1727
1728 static bool
1729 xbundle_trunks_vlan(const struct xbundle *bundle, uint16_t vlan)
1730 {
1731 return (bundle->vlan_mode != PORT_VLAN_ACCESS
1732 && (!bundle->trunks || bitmap_is_set(bundle->trunks, vlan)));
1733 }
1734
1735 static bool
1736 xbundle_allows_cvlan(const struct xbundle *bundle, uint16_t vlan)
1737 {
1738 return (!bundle->cvlans || bitmap_is_set(bundle->cvlans, vlan));
1739 }
1740
1741 static bool
1742 xbundle_includes_vlan(const struct xbundle *xbundle, const struct xvlan *xvlan)
1743 {
1744 switch (xbundle->vlan_mode) {
1745 case PORT_VLAN_ACCESS:
1746 return xvlan->v[0].vid == xbundle->vlan && xvlan->v[1].vid == 0;
1747
1748 case PORT_VLAN_TRUNK:
1749 case PORT_VLAN_NATIVE_UNTAGGED:
1750 case PORT_VLAN_NATIVE_TAGGED:
1751 return xbundle_trunks_vlan(xbundle, xvlan->v[0].vid);
1752
1753 case PORT_VLAN_DOT1Q_TUNNEL:
1754 return xvlan->v[0].vid == xbundle->vlan &&
1755 xbundle_allows_cvlan(xbundle, xvlan->v[1].vid);
1756
1757 default:
1758 OVS_NOT_REACHED();
1759 }
1760 }
1761
1762 static mirror_mask_t
1763 xbundle_mirror_out(const struct xbridge *xbridge, struct xbundle *xbundle)
1764 {
1765 return xbundle != &ofpp_none_bundle
1766 ? mirror_bundle_out(xbridge->mbridge, xbundle->ofbundle)
1767 : 0;
1768 }
1769
1770 static mirror_mask_t
1771 xbundle_mirror_src(const struct xbridge *xbridge, struct xbundle *xbundle)
1772 {
1773 return xbundle != &ofpp_none_bundle
1774 ? mirror_bundle_src(xbridge->mbridge, xbundle->ofbundle)
1775 : 0;
1776 }
1777
1778 static mirror_mask_t
1779 xbundle_mirror_dst(const struct xbridge *xbridge, struct xbundle *xbundle)
1780 {
1781 return xbundle != &ofpp_none_bundle
1782 ? mirror_bundle_dst(xbridge->mbridge, xbundle->ofbundle)
1783 : 0;
1784 }
1785
1786 static struct xbundle *
1787 lookup_input_bundle__(const struct xbridge *xbridge,
1788 ofp_port_t in_port, struct xport **in_xportp)
1789 {
1790 struct xport *xport;
1791
1792 /* Find the port and bundle for the received packet. */
1793 xport = get_ofp_port(xbridge, in_port);
1794 if (in_xportp) {
1795 *in_xportp = xport;
1796 }
1797 if (xport && xport->xbundle) {
1798 return xport->xbundle;
1799 }
1800
1801 /* Special-case OFPP_NONE (OF1.0) and OFPP_CONTROLLER (OF1.1+),
1802 * which a controller may use as the ingress port for traffic that
1803 * it is sourcing. */
1804 if (in_port == OFPP_CONTROLLER || in_port == OFPP_NONE) {
1805 return &ofpp_none_bundle;
1806 }
1807 return NULL;
1808 }
1809
1810 static struct xbundle *
1811 lookup_input_bundle(const struct xlate_ctx *ctx,
1812 ofp_port_t in_port, struct xport **in_xportp)
1813 {
1814 struct xbundle *xbundle = lookup_input_bundle__(ctx->xbridge,
1815 in_port, in_xportp);
1816 if (!xbundle) {
1817 /* Odd. A few possible reasons here:
1818 *
1819 * - We deleted a port but there are still a few packets queued up
1820 * from it.
1821 *
1822 * - Someone externally added a port (e.g. "ovs-dpctl add-if") that
1823 * we don't know about.
1824 *
1825 * - The ofproto client didn't configure the port as part of a bundle.
1826 * This is particularly likely to happen if a packet was received on
1827 * the port after it was created, but before the client had a chance
1828 * to configure its bundle.
1829 */
1830 xlate_report_error(ctx, "received packet on unknown port %"PRIu32,
1831 in_port);
1832 }
1833 return xbundle;
1834 }
1835
1836 /* Mirrors the packet represented by 'ctx' to appropriate mirror destinations,
1837 * given the packet is ingressing or egressing on 'xbundle', which has ingress
1838 * or egress (as appropriate) mirrors 'mirrors'. */
1839 static void
1840 mirror_packet(struct xlate_ctx *ctx, struct xbundle *xbundle,
1841 mirror_mask_t mirrors)
1842 {
1843 struct xvlan in_xvlan;
1844 struct xvlan xvlan;
1845
1846 /* Figure out what VLAN the packet is in (because mirrors can select
1847 * packets on basis of VLAN). */
1848 xvlan_extract(&ctx->xin->flow, &in_xvlan);
1849 if (!input_vid_is_valid(ctx, in_xvlan.v[0].vid, xbundle)) {
1850 return;
1851 }
1852 xvlan_input_translate(xbundle, &in_xvlan, &xvlan);
1853
1854 const struct xbridge *xbridge = ctx->xbridge;
1855
1856 /* Don't mirror to destinations that we've already mirrored to. */
1857 mirrors &= ~ctx->mirrors;
1858 if (!mirrors) {
1859 return;
1860 }
1861
1862 if (ctx->xin->resubmit_stats) {
1863 mirror_update_stats(xbridge->mbridge, mirrors,
1864 ctx->xin->resubmit_stats->n_packets,
1865 ctx->xin->resubmit_stats->n_bytes);
1866 }
1867 if (ctx->xin->xcache) {
1868 struct xc_entry *entry;
1869
1870 entry = xlate_cache_add_entry(ctx->xin->xcache, XC_MIRROR);
1871 entry->mirror.mbridge = mbridge_ref(xbridge->mbridge);
1872 entry->mirror.mirrors = mirrors;
1873 }
1874
1875 /* 'mirrors' is a bit-mask of candidates for mirroring. Iterate as long as
1876 * some candidates remain. */
1877 while (mirrors) {
1878 const unsigned long *vlans;
1879 mirror_mask_t dup_mirrors;
1880 struct ofbundle *out;
1881 int out_vlan;
1882 int snaplen;
1883
1884 /* Get the details of the mirror represented by the rightmost 1-bit. */
1885 bool has_mirror = mirror_get(xbridge->mbridge, raw_ctz(mirrors),
1886 &vlans, &dup_mirrors,
1887 &out, &snaplen, &out_vlan);
1888 ovs_assert(has_mirror);
1889
1890
1891 /* If this mirror selects on the basis of VLAN, and it does not select
1892 * 'vlan', then discard this mirror and go on to the next one. */
1893 if (vlans) {
1894 ctx->wc->masks.vlans[0].tci |= htons(VLAN_CFI | VLAN_VID_MASK);
1895 }
1896 if (vlans && !bitmap_is_set(vlans, xvlan.v[0].vid)) {
1897 mirrors = zero_rightmost_1bit(mirrors);
1898 continue;
1899 }
1900
1901 /* Record the mirror, and the mirrors that output to the same
1902 * destination, so that we don't mirror to them again. This must be
1903 * done now to ensure that output_normal(), below, doesn't recursively
1904 * output to the same mirrors. */
1905 ctx->mirrors |= dup_mirrors;
1906 ctx->mirror_snaplen = snaplen;
1907
1908 /* Send the packet to the mirror. */
1909 if (out) {
1910 struct xlate_cfg *xcfg = ovsrcu_get(struct xlate_cfg *, &xcfgp);
1911 struct xbundle *out_xbundle = xbundle_lookup(xcfg, out);
1912 if (out_xbundle) {
1913 output_normal(ctx, out_xbundle, &xvlan);
1914 }
1915 } else if (xvlan.v[0].vid != out_vlan
1916 && !eth_addr_is_reserved(ctx->xin->flow.dl_dst)) {
1917 struct xbundle *xb;
1918 uint16_t old_vid = xvlan.v[0].vid;
1919
1920 xvlan.v[0].vid = out_vlan;
1921 LIST_FOR_EACH (xb, list_node, &xbridge->xbundles) {
1922 if (xbundle_includes_vlan(xb, &xvlan)
1923 && !xbundle_mirror_out(xbridge, xb)) {
1924 output_normal(ctx, xb, &xvlan);
1925 }
1926 }
1927 xvlan.v[0].vid = old_vid;
1928 }
1929
1930 /* output_normal() could have recursively output (to different
1931 * mirrors), so make sure that we don't send duplicates. */
1932 mirrors &= ~ctx->mirrors;
1933 ctx->mirror_snaplen = 0;
1934 }
1935 }
1936
1937 static void
1938 mirror_ingress_packet(struct xlate_ctx *ctx)
1939 {
1940 if (mbridge_has_mirrors(ctx->xbridge->mbridge)) {
1941 struct xbundle *xbundle = lookup_input_bundle(
1942 ctx, ctx->xin->flow.in_port.ofp_port, NULL);
1943 if (xbundle) {
1944 mirror_packet(ctx, xbundle,
1945 xbundle_mirror_src(ctx->xbridge, xbundle));
1946 }
1947 }
1948 }
1949
1950 /* Checks whether a packet with the given 'vid' may ingress on 'in_xbundle'.
1951 * If so, returns true. Otherwise, returns false.
1952 *
1953 * 'vid' should be the VID obtained from the 802.1Q header that was received as
1954 * part of a packet (specify 0 if there was no 802.1Q header), in the range
1955 * 0...4095. */
1956 static bool
1957 input_vid_is_valid(const struct xlate_ctx *ctx,
1958 uint16_t vid, struct xbundle *in_xbundle)
1959 {
1960 /* Allow any VID on the OFPP_NONE port. */
1961 if (in_xbundle == &ofpp_none_bundle) {
1962 return true;
1963 }
1964
1965 switch (in_xbundle->vlan_mode) {
1966 case PORT_VLAN_ACCESS:
1967 if (vid) {
1968 xlate_report_error(ctx, "dropping VLAN %"PRIu16" tagged "
1969 "packet received on port %s configured as VLAN "
1970 "%d access port", vid, in_xbundle->name,
1971 in_xbundle->vlan);
1972 return false;
1973 }
1974 return true;
1975
1976 case PORT_VLAN_NATIVE_UNTAGGED:
1977 case PORT_VLAN_NATIVE_TAGGED:
1978 if (!vid) {
1979 /* Port must always carry its native VLAN. */
1980 return true;
1981 }
1982 /* Fall through. */
1983 case PORT_VLAN_TRUNK:
1984 if (!xbundle_trunks_vlan(in_xbundle, vid)) {
1985 xlate_report_error(ctx, "dropping VLAN %"PRIu16" packet "
1986 "received on port %s not configured for "
1987 "trunking VLAN %"PRIu16,
1988 vid, in_xbundle->name, vid);
1989 return false;
1990 }
1991 return true;
1992
1993 case PORT_VLAN_DOT1Q_TUNNEL:
1994 if (!xbundle_allows_cvlan(in_xbundle, vid)) {
1995 xlate_report_error(ctx, "dropping VLAN %"PRIu16" packet received "
1996 "on dot1q-tunnel port %s that excludes this "
1997 "VLAN", vid, in_xbundle->name);
1998 return false;
1999 }
2000 return true;
2001
2002 default:
2003 OVS_NOT_REACHED();
2004 }
2005
2006 }
2007
2008 static void
2009 xvlan_copy(struct xvlan *dst, const struct xvlan *src)
2010 {
2011 *dst = *src;
2012 }
2013
2014 static void
2015 xvlan_pop(struct xvlan *src)
2016 {
2017 memmove(&src->v[0], &src->v[1], sizeof(src->v) - sizeof(src->v[0]));
2018 memset(&src->v[FLOW_MAX_VLAN_HEADERS - 1], 0,
2019 sizeof(src->v[FLOW_MAX_VLAN_HEADERS - 1]));
2020 }
2021
2022 static void
2023 xvlan_push_uninit(struct xvlan *src)
2024 {
2025 memmove(&src->v[1], &src->v[0], sizeof(src->v) - sizeof(src->v[0]));
2026 memset(&src->v[0], 0, sizeof(src->v[0]));
2027 }
2028
2029 /* Extract VLAN information (headers) from flow */
2030 static void
2031 xvlan_extract(const struct flow *flow, struct xvlan *xvlan)
2032 {
2033 int i;
2034 memset(xvlan, 0, sizeof(*xvlan));
2035 for (i = 0; i < FLOW_MAX_VLAN_HEADERS; i++) {
2036 if (!eth_type_vlan(flow->vlans[i].tpid) ||
2037 !(flow->vlans[i].tci & htons(VLAN_CFI))) {
2038 break;
2039 }
2040 xvlan->v[i].tpid = ntohs(flow->vlans[i].tpid);
2041 xvlan->v[i].vid = vlan_tci_to_vid(flow->vlans[i].tci);
2042 xvlan->v[i].pcp = ntohs(flow->vlans[i].tci) & VLAN_PCP_MASK;
2043 }
2044 }
2045
2046 /* Put VLAN information (headers) to flow */
2047 static void
2048 xvlan_put(struct flow *flow, const struct xvlan *xvlan)
2049 {
2050 ovs_be16 tci;
2051 int i;
2052 for (i = 0; i < FLOW_MAX_VLAN_HEADERS; i++) {
2053 tci = htons(xvlan->v[i].vid | (xvlan->v[i].pcp & VLAN_PCP_MASK));
2054 if (tci) {
2055 tci |= htons(VLAN_CFI);
2056 flow->vlans[i].tpid = xvlan->v[i].tpid ?
2057 htons(xvlan->v[i].tpid) :
2058 htons(ETH_TYPE_VLAN_8021Q);
2059 }
2060 flow->vlans[i].tci = tci;
2061 }
2062 }
2063
2064 /* Given 'in_xvlan', extracted from the input 802.1Q headers received as part
2065 * of a packet, and 'in_xbundle', the bundle on which the packet was received,
2066 * returns the VLANs of the packet during bridge internal processing. */
2067 static void
2068 xvlan_input_translate(const struct xbundle *in_xbundle,
2069 const struct xvlan *in_xvlan, struct xvlan *xvlan)
2070 {
2071
2072 switch (in_xbundle->vlan_mode) {
2073 case PORT_VLAN_ACCESS:
2074 memset(xvlan, 0, sizeof(*xvlan));
2075 xvlan->v[0].tpid = in_xvlan->v[0].tpid ? in_xvlan->v[0].tpid :
2076 ETH_TYPE_VLAN_8021Q;
2077 xvlan->v[0].vid = in_xbundle->vlan;
2078 xvlan->v[0].pcp = in_xvlan->v[0].pcp;
2079 break;
2080
2081 case PORT_VLAN_TRUNK:
2082 xvlan_copy(xvlan, in_xvlan);
2083 break;
2084
2085 case PORT_VLAN_NATIVE_UNTAGGED:
2086 case PORT_VLAN_NATIVE_TAGGED:
2087 xvlan_copy(xvlan, in_xvlan);
2088 if (!in_xvlan->v[0].vid) {
2089 xvlan->v[0].tpid = in_xvlan->v[0].tpid ? in_xvlan->v[0].tpid :
2090 ETH_TYPE_VLAN_8021Q;
2091 xvlan->v[0].vid = in_xbundle->vlan;
2092 xvlan->v[0].pcp = in_xvlan->v[0].pcp;
2093 }
2094 break;
2095
2096 case PORT_VLAN_DOT1Q_TUNNEL:
2097 xvlan_copy(xvlan, in_xvlan);
2098 xvlan_push_uninit(xvlan);
2099 xvlan->v[0].tpid = in_xbundle->qinq_ethtype;
2100 xvlan->v[0].vid = in_xbundle->vlan;
2101 xvlan->v[0].pcp = 0;
2102 break;
2103
2104 default:
2105 OVS_NOT_REACHED();
2106 }
2107 }
2108
2109 /* Given 'xvlan', the VLANs of a packet during internal processing, and
2110 * 'out_xbundle', a bundle on which the packet is to be output, returns the
2111 * VLANs that should be included in output packet. */
2112 static void
2113 xvlan_output_translate(const struct xbundle *out_xbundle,
2114 const struct xvlan *xvlan, struct xvlan *out_xvlan)
2115 {
2116 switch (out_xbundle->vlan_mode) {
2117 case PORT_VLAN_ACCESS:
2118 memset(out_xvlan, 0, sizeof(*out_xvlan));
2119 break;
2120
2121 case PORT_VLAN_TRUNK:
2122 case PORT_VLAN_NATIVE_TAGGED:
2123 xvlan_copy(out_xvlan, xvlan);
2124 break;
2125
2126 case PORT_VLAN_NATIVE_UNTAGGED:
2127 xvlan_copy(out_xvlan, xvlan);
2128 if (xvlan->v[0].vid == out_xbundle->vlan) {
2129 xvlan_pop(out_xvlan);
2130 }
2131 break;
2132
2133 case PORT_VLAN_DOT1Q_TUNNEL:
2134 xvlan_copy(out_xvlan, xvlan);
2135 xvlan_pop(out_xvlan);
2136 break;
2137
2138 default:
2139 OVS_NOT_REACHED();
2140 }
2141 }
2142
2143 /* If output xbundle is dot1q-tunnel, set mask bits of cvlan */
2144 static void
2145 check_and_set_cvlan_mask(struct flow_wildcards *wc,
2146 const struct xbundle *xbundle)
2147 {
2148 if (xbundle->vlan_mode == PORT_VLAN_DOT1Q_TUNNEL && xbundle->cvlans) {
2149 wc->masks.vlans[1].tci = htons(0xffff);
2150 }
2151 }
2152
2153 static void
2154 output_normal(struct xlate_ctx *ctx, const struct xbundle *out_xbundle,
2155 const struct xvlan *xvlan)
2156 {
2157 uint16_t vid;
2158 union flow_vlan_hdr old_vlans[FLOW_MAX_VLAN_HEADERS];
2159 struct xport *xport;
2160 struct xlate_bond_recirc xr;
2161 bool use_recirc = false;
2162 struct xvlan out_xvlan;
2163
2164 check_and_set_cvlan_mask(ctx->wc, out_xbundle);
2165
2166 xvlan_output_translate(out_xbundle, xvlan, &out_xvlan);
2167 if (out_xbundle->use_priority_tags) {
2168 out_xvlan.v[0].pcp = ntohs(ctx->xin->flow.vlans[0].tci) &
2169 VLAN_PCP_MASK;
2170 }
2171 vid = out_xvlan.v[0].vid;
2172 if (ovs_list_is_empty(&out_xbundle->xports)) {
2173 /* Partially configured bundle with no slaves. Drop the packet. */
2174 return;
2175 } else if (!out_xbundle->bond) {
2176 xport = CONTAINER_OF(ovs_list_front(&out_xbundle->xports), struct xport,
2177 bundle_node);
2178 } else {
2179 struct xlate_cfg *xcfg = ovsrcu_get(struct xlate_cfg *, &xcfgp);
2180 struct flow_wildcards *wc = ctx->wc;
2181 struct ofport_dpif *ofport;
2182
2183 if (ctx->xbridge->support.odp.recirc) {
2184 /* In case recirculation is not actually in use, 'xr.recirc_id'
2185 * will be set to '0', since a valid 'recirc_id' can
2186 * not be zero. */
2187 bond_update_post_recirc_rules(out_xbundle->bond,
2188 &xr.recirc_id,
2189 &xr.hash_basis);
2190 if (xr.recirc_id) {
2191 /* Use recirculation instead of output. */
2192 use_recirc = true;
2193 xr.hash_alg = OVS_HASH_ALG_L4;
2194 /* Recirculation does not require unmasking hash fields. */
2195 wc = NULL;
2196 }
2197 }
2198
2199 ofport = bond_choose_output_slave(out_xbundle->bond,
2200 &ctx->xin->flow, wc, vid);
2201 xport = xport_lookup(xcfg, ofport);
2202
2203 if (!xport) {
2204 /* No slaves enabled, so drop packet. */
2205 return;
2206 }
2207
2208 /* If use_recirc is set, the main thread will handle stats
2209 * accounting for this bond. */
2210 if (!use_recirc) {
2211 if (ctx->xin->resubmit_stats) {
2212 bond_account(out_xbundle->bond, &ctx->xin->flow, vid,
2213 ctx->xin->resubmit_stats->n_bytes);
2214 }
2215 if (ctx->xin->xcache) {
2216 struct xc_entry *entry;
2217 struct flow *flow;
2218
2219 flow = &ctx->xin->flow;
2220 entry = xlate_cache_add_entry(ctx->xin->xcache, XC_BOND);
2221 entry->bond.bond = bond_ref(out_xbundle->bond);
2222 entry->bond.flow = xmemdup(flow, sizeof *flow);
2223 entry->bond.vid = vid;
2224 }
2225 }
2226 }
2227
2228 memcpy(&old_vlans, &ctx->xin->flow.vlans, sizeof(old_vlans));
2229 xvlan_put(&ctx->xin->flow, &out_xvlan);
2230
2231 compose_output_action(ctx, xport->ofp_port, use_recirc ? &xr : NULL,
2232 false, false);
2233 memcpy(&ctx->xin->flow.vlans, &old_vlans, sizeof(old_vlans));
2234 }
2235
2236 /* A VM broadcasts a gratuitous ARP to indicate that it has resumed after
2237 * migration. Older Citrix-patched Linux DomU used gratuitous ARP replies to
2238 * indicate this; newer upstream kernels use gratuitous ARP requests. */
2239 static bool
2240 is_gratuitous_arp(const struct flow *flow, struct flow_wildcards *wc)
2241 {
2242 if (flow->dl_type != htons(ETH_TYPE_ARP)) {
2243 return false;
2244 }
2245
2246 memset(&wc->masks.dl_dst, 0xff, sizeof wc->masks.dl_dst);
2247 if (!eth_addr_is_broadcast(flow->dl_dst)) {
2248 return false;
2249 }
2250
2251 memset(&wc->masks.nw_proto, 0xff, sizeof wc->masks.nw_proto);
2252 if (flow->nw_proto == ARP_OP_REPLY) {
2253 return true;
2254 } else if (flow->nw_proto == ARP_OP_REQUEST) {
2255 memset(&wc->masks.nw_src, 0xff, sizeof wc->masks.nw_src);
2256 memset(&wc->masks.nw_dst, 0xff, sizeof wc->masks.nw_dst);
2257
2258 return flow->nw_src == flow->nw_dst;
2259 } else {
2260 return false;
2261 }
2262 }
2263
2264 /* Determines whether packets in 'flow' within 'xbridge' should be forwarded or
2265 * dropped. Returns true if they may be forwarded, false if they should be
2266 * dropped.
2267 *
2268 * 'in_port' must be the xport that corresponds to flow->in_port.
2269 * 'in_port' must be part of a bundle (e.g. in_port->bundle must be nonnull).
2270 *
2271 * 'vlan' must be the VLAN that corresponds to flow->vlan_tci on 'in_port', as
2272 * returned by input_vid_to_vlan(). It must be a valid VLAN for 'in_port', as
2273 * checked by input_vid_is_valid().
2274 *
2275 * May also add tags to '*tags', although the current implementation only does
2276 * so in one special case.
2277 */
2278 static bool
2279 is_admissible(struct xlate_ctx *ctx, struct xport *in_port,
2280 uint16_t vlan)
2281 {
2282 struct xbundle *in_xbundle = in_port->xbundle;
2283 const struct xbridge *xbridge = ctx->xbridge;
2284 struct flow *flow = &ctx->xin->flow;
2285
2286 /* Drop frames for reserved multicast addresses
2287 * only if forward_bpdu option is absent. */
2288 if (!xbridge->forward_bpdu && eth_addr_is_reserved(flow->dl_dst)) {
2289 xlate_report(ctx, OFT_DETAIL,
2290 "packet has reserved destination MAC, dropping");
2291 return false;
2292 }
2293
2294 if (in_xbundle->bond) {
2295 struct mac_entry *mac;
2296
2297 switch (bond_check_admissibility(in_xbundle->bond, in_port->ofport,
2298 flow->dl_dst)) {
2299 case BV_ACCEPT:
2300 break;
2301
2302 case BV_DROP:
2303 xlate_report(ctx, OFT_DETAIL,
2304 "bonding refused admissibility, dropping");
2305 return false;
2306
2307 case BV_DROP_IF_MOVED:
2308 ovs_rwlock_rdlock(&xbridge->ml->rwlock);
2309 mac = mac_learning_lookup(xbridge->ml, flow->dl_src, vlan);
2310 if (mac
2311 && mac_entry_get_port(xbridge->ml, mac) != in_xbundle->ofbundle
2312 && (!is_gratuitous_arp(flow, ctx->wc)
2313 || mac_entry_is_grat_arp_locked(mac))) {
2314 ovs_rwlock_unlock(&xbridge->ml->rwlock);
2315 xlate_report(ctx, OFT_DETAIL,
2316 "SLB bond thinks this packet looped back, "
2317 "dropping");
2318 return false;
2319 }
2320 ovs_rwlock_unlock(&xbridge->ml->rwlock);
2321 break;
2322 }
2323 }
2324
2325 return true;
2326 }
2327
2328 static bool
2329 update_learning_table__(const struct xbridge *xbridge,
2330 struct xbundle *in_xbundle, struct eth_addr dl_src,
2331 int vlan, bool is_grat_arp)
2332 {
2333 return (in_xbundle == &ofpp_none_bundle
2334 || !mac_learning_update(xbridge->ml, dl_src, vlan,
2335 is_grat_arp,
2336 in_xbundle->bond != NULL,
2337 in_xbundle->ofbundle));
2338 }
2339
2340 static void
2341 update_learning_table(const struct xlate_ctx *ctx,
2342 struct xbundle *in_xbundle, struct eth_addr dl_src,
2343 int vlan, bool is_grat_arp)
2344 {
2345 if (!update_learning_table__(ctx->xbridge, in_xbundle, dl_src, vlan,
2346 is_grat_arp)) {
2347 xlate_report_debug(ctx, OFT_DETAIL, "learned that "ETH_ADDR_FMT" is "
2348 "on port %s in VLAN %d",
2349 ETH_ADDR_ARGS(dl_src), in_xbundle->name, vlan);
2350 }
2351 }
2352
2353 /* Updates multicast snooping table 'ms' given that a packet matching 'flow'
2354 * was received on 'in_xbundle' in 'vlan' and is either Report or Query. */
2355 static void
2356 update_mcast_snooping_table4__(const struct xlate_ctx *ctx,
2357 const struct flow *flow,
2358 struct mcast_snooping *ms, int vlan,
2359 struct xbundle *in_xbundle,
2360 const struct dp_packet *packet)
2361 OVS_REQ_WRLOCK(ms->rwlock)
2362 {
2363 const struct igmp_header *igmp;
2364 int count;
2365 size_t offset;
2366 ovs_be32 ip4 = flow->igmp_group_ip4;
2367
2368 offset = (char *) dp_packet_l4(packet) - (char *) dp_packet_data(packet);
2369 igmp = dp_packet_at(packet, offset, IGMP_HEADER_LEN);
2370 if (!igmp || csum(igmp, dp_packet_l4_size(packet)) != 0) {
2371 xlate_report_debug(ctx, OFT_DETAIL,
2372 "multicast snooping received bad IGMP "
2373 "checksum on port %s in VLAN %d",
2374 in_xbundle->name, vlan);
2375 return;
2376 }
2377
2378 switch (ntohs(flow->tp_src)) {
2379 case IGMP_HOST_MEMBERSHIP_REPORT:
2380 case IGMPV2_HOST_MEMBERSHIP_REPORT:
2381 if (mcast_snooping_add_group4(ms, ip4, vlan, in_xbundle->ofbundle)) {
2382 xlate_report_debug(ctx, OFT_DETAIL,
2383 "multicast snooping learned that "
2384 IP_FMT" is on port %s in VLAN %d",
2385 IP_ARGS(ip4), in_xbundle->name, vlan);
2386 }
2387 break;
2388 case IGMP_HOST_LEAVE_MESSAGE:
2389 if (mcast_snooping_leave_group4(ms, ip4, vlan, in_xbundle->ofbundle)) {
2390 xlate_report_debug(ctx, OFT_DETAIL, "multicast snooping leaving "
2391 IP_FMT" is on port %s in VLAN %d",
2392 IP_ARGS(ip4), in_xbundle->name, vlan);
2393 }
2394 break;
2395 case IGMP_HOST_MEMBERSHIP_QUERY:
2396 if (flow->nw_src && mcast_snooping_add_mrouter(ms, vlan,
2397 in_xbundle->ofbundle)) {
2398 xlate_report_debug(ctx, OFT_DETAIL, "multicast snooping query "
2399 "from "IP_FMT" is on port %s in VLAN %d",
2400 IP_ARGS(flow->nw_src), in_xbundle->name, vlan);
2401 }
2402 break;
2403 case IGMPV3_HOST_MEMBERSHIP_REPORT:
2404 count = mcast_snooping_add_report(ms, packet, vlan,
2405 in_xbundle->ofbundle);
2406 if (count) {
2407 xlate_report_debug(ctx, OFT_DETAIL, "multicast snooping processed "
2408 "%d addresses on port %s in VLAN %d",
2409 count, in_xbundle->name, vlan);
2410 }
2411 break;
2412 }
2413 }
2414
2415 static void
2416 update_mcast_snooping_table6__(const struct xlate_ctx *ctx,
2417 const struct flow *flow,
2418 struct mcast_snooping *ms, int vlan,
2419 struct xbundle *in_xbundle,
2420 const struct dp_packet *packet)
2421 OVS_REQ_WRLOCK(ms->rwlock)
2422 {
2423 const struct mld_header *mld;
2424 int count;
2425 size_t offset;
2426
2427 offset = (char *) dp_packet_l4(packet) - (char *) dp_packet_data(packet);
2428 mld = dp_packet_at(packet, offset, MLD_HEADER_LEN);
2429
2430 if (!mld ||
2431 packet_csum_upperlayer6(dp_packet_l3(packet),
2432 mld, IPPROTO_ICMPV6,
2433 dp_packet_l4_size(packet)) != 0) {
2434 xlate_report_debug(ctx, OFT_DETAIL, "multicast snooping received "
2435 "bad MLD checksum on port %s in VLAN %d",
2436 in_xbundle->name, vlan);
2437 return;
2438 }
2439
2440 switch (ntohs(flow->tp_src)) {
2441 case MLD_QUERY:
2442 if (!ipv6_addr_equals(&flow->ipv6_src, &in6addr_any)
2443 && mcast_snooping_add_mrouter(ms, vlan, in_xbundle->ofbundle)) {
2444 xlate_report_debug(ctx, OFT_DETAIL, "multicast snooping query on "
2445 "port %s in VLAN %d", in_xbundle->name, vlan);
2446 }
2447 break;
2448 case MLD_REPORT:
2449 case MLD_DONE:
2450 case MLD2_REPORT:
2451 count = mcast_snooping_add_mld(ms, packet, vlan, in_xbundle->ofbundle);
2452 if (count) {
2453 xlate_report_debug(ctx, OFT_DETAIL, "multicast snooping processed "
2454 "%d addresses on port %s in VLAN %d",
2455 count, in_xbundle->name, vlan);
2456 }
2457 break;
2458 }
2459 }
2460
2461 /* Updates multicast snooping table 'ms' given that a packet matching 'flow'
2462 * was received on 'in_xbundle' in 'vlan'. */
2463 static void
2464 update_mcast_snooping_table(const struct xlate_ctx *ctx,
2465 const struct flow *flow, int vlan,
2466 struct xbundle *in_xbundle,
2467 const struct dp_packet *packet)
2468 {
2469 struct mcast_snooping *ms = ctx->xbridge->ms;
2470 struct xlate_cfg *xcfg;
2471 struct xbundle *mcast_xbundle;
2472 struct mcast_port_bundle *fport;
2473
2474 /* Don't learn the OFPP_NONE port. */
2475 if (in_xbundle == &ofpp_none_bundle) {
2476 return;
2477 }
2478
2479 /* Don't learn from flood ports */
2480 mcast_xbundle = NULL;
2481 ovs_rwlock_wrlock(&ms->rwlock);
2482 xcfg = ovsrcu_get(struct xlate_cfg *, &xcfgp);
2483 LIST_FOR_EACH(fport, node, &ms->fport_list) {
2484 mcast_xbundle = xbundle_lookup(xcfg, fport->port);
2485 if (mcast_xbundle == in_xbundle) {
2486 break;
2487 }
2488 }
2489
2490 if (!mcast_xbundle || mcast_xbundle != in_xbundle) {
2491 if (flow->dl_type == htons(ETH_TYPE_IP)) {
2492 update_mcast_snooping_table4__(ctx, flow, ms, vlan,
2493 in_xbundle, packet);
2494 } else {
2495 update_mcast_snooping_table6__(ctx, flow, ms, vlan,
2496 in_xbundle, packet);
2497 }
2498 }
2499 ovs_rwlock_unlock(&ms->rwlock);
2500 }
2501
2502 /* send the packet to ports having the multicast group learned */
2503 static void
2504 xlate_normal_mcast_send_group(struct xlate_ctx *ctx,
2505 struct mcast_snooping *ms OVS_UNUSED,
2506 struct mcast_group *grp,
2507 struct xbundle *in_xbundle,
2508 const struct xvlan *xvlan)
2509 OVS_REQ_RDLOCK(ms->rwlock)
2510 {
2511 struct xlate_cfg *xcfg;
2512 struct mcast_group_bundle *b;
2513 struct xbundle *mcast_xbundle;
2514
2515 xcfg = ovsrcu_get(struct xlate_cfg *, &xcfgp);
2516 LIST_FOR_EACH(b, bundle_node, &grp->bundle_lru) {
2517 mcast_xbundle = xbundle_lookup(xcfg, b->port);
2518 if (mcast_xbundle && mcast_xbundle != in_xbundle) {
2519 xlate_report(ctx, OFT_DETAIL, "forwarding to mcast group port");
2520 output_normal(ctx, mcast_xbundle, xvlan);
2521 } else if (!mcast_xbundle) {
2522 xlate_report(ctx, OFT_WARN,
2523 "mcast group port is unknown, dropping");
2524 } else {
2525 xlate_report(ctx, OFT_DETAIL,
2526 "mcast group port is input port, dropping");
2527 }
2528 }
2529 }
2530
2531 /* send the packet to ports connected to multicast routers */
2532 static void
2533 xlate_normal_mcast_send_mrouters(struct xlate_ctx *ctx,
2534 struct mcast_snooping *ms,
2535 struct xbundle *in_xbundle,
2536 const struct xvlan *xvlan)
2537 OVS_REQ_RDLOCK(ms->rwlock)
2538 {
2539 struct xlate_cfg *xcfg;
2540 struct mcast_mrouter_bundle *mrouter;
2541 struct xbundle *mcast_xbundle;
2542
2543 xcfg = ovsrcu_get(struct xlate_cfg *, &xcfgp);
2544 LIST_FOR_EACH(mrouter, mrouter_node, &ms->mrouter_lru) {
2545 mcast_xbundle = xbundle_lookup(xcfg, mrouter->port);
2546 if (mcast_xbundle && mcast_xbundle != in_xbundle
2547 && mrouter->vlan == xvlan->v[0].vid) {
2548 xlate_report(ctx, OFT_DETAIL, "forwarding to mcast router port");
2549 output_normal(ctx, mcast_xbundle, xvlan);
2550 } else if (!mcast_xbundle) {
2551 xlate_report(ctx, OFT_WARN,
2552 "mcast router port is unknown, dropping");
2553 } else if (mrouter->vlan != xvlan->v[0].vid) {
2554 xlate_report(ctx, OFT_DETAIL,
2555 "mcast router is on another vlan, dropping");
2556 } else {
2557 xlate_report(ctx, OFT_DETAIL,
2558 "mcast router port is input port, dropping");
2559 }
2560 }
2561 }
2562
2563 /* send the packet to ports flagged to be flooded */
2564 static void
2565 xlate_normal_mcast_send_fports(struct xlate_ctx *ctx,
2566 struct mcast_snooping *ms,
2567 struct xbundle *in_xbundle,
2568 const struct xvlan *xvlan)
2569 OVS_REQ_RDLOCK(ms->rwlock)
2570 {
2571 struct xlate_cfg *xcfg;
2572 struct mcast_port_bundle *fport;
2573 struct xbundle *mcast_xbundle;
2574
2575 xcfg = ovsrcu_get(struct xlate_cfg *, &xcfgp);
2576 LIST_FOR_EACH(fport, node, &ms->fport_list) {
2577 mcast_xbundle = xbundle_lookup(xcfg, fport->port);
2578 if (mcast_xbundle && mcast_xbundle != in_xbundle) {
2579 xlate_report(ctx, OFT_DETAIL, "forwarding to mcast flood port");
2580 output_normal(ctx, mcast_xbundle, xvlan);
2581 } else if (!mcast_xbundle) {
2582 xlate_report(ctx, OFT_WARN,
2583 "mcast flood port is unknown, dropping");
2584 } else {
2585 xlate_report(ctx, OFT_DETAIL,
2586 "mcast flood port is input port, dropping");
2587 }
2588 }
2589 }
2590
2591 /* forward the Reports to configured ports */
2592 static void
2593 xlate_normal_mcast_send_rports(struct xlate_ctx *ctx,
2594 struct mcast_snooping *ms,
2595 struct xbundle *in_xbundle,
2596 const struct xvlan *xvlan)
2597 OVS_REQ_RDLOCK(ms->rwlock)
2598 {
2599 struct xlate_cfg *xcfg;
2600 struct mcast_port_bundle *rport;
2601 struct xbundle *mcast_xbundle;
2602
2603 xcfg = ovsrcu_get(struct xlate_cfg *, &xcfgp);
2604 LIST_FOR_EACH(rport, node, &ms->rport_list) {
2605 mcast_xbundle = xbundle_lookup(xcfg, rport->port);
2606 if (mcast_xbundle && mcast_xbundle != in_xbundle) {
2607 xlate_report(ctx, OFT_DETAIL,
2608 "forwarding report to mcast flagged port");
2609 output_normal(ctx, mcast_xbundle, xvlan);
2610 } else if (!mcast_xbundle) {
2611 xlate_report(ctx, OFT_WARN,
2612 "mcast port is unknown, dropping the report");
2613 } else {
2614 xlate_report(ctx, OFT_DETAIL,
2615 "mcast port is input port, dropping the Report");
2616 }
2617 }
2618 }
2619
2620 static void
2621 xlate_normal_flood(struct xlate_ctx *ctx, struct xbundle *in_xbundle,
2622 struct xvlan *xvlan)
2623 {
2624 struct xbundle *xbundle;
2625
2626 LIST_FOR_EACH (xbundle, list_node, &ctx->xbridge->xbundles) {
2627 if (xbundle != in_xbundle
2628 && xbundle_includes_vlan(xbundle, xvlan)
2629 && xbundle->floodable
2630 && !xbundle_mirror_out(ctx->xbridge, xbundle)) {
2631 output_normal(ctx, xbundle, xvlan);
2632 }
2633 }
2634 ctx->nf_output_iface = NF_OUT_FLOOD;
2635 }
2636
2637 static bool
2638 is_ip_local_multicast(const struct flow *flow, struct flow_wildcards *wc)
2639 {
2640 if (flow->dl_type == htons(ETH_TYPE_IP)) {
2641 memset(&wc->masks.nw_dst, 0xff, sizeof wc->masks.nw_dst);
2642 return ip_is_local_multicast(flow->nw_dst);
2643 } else if (flow->dl_type == htons(ETH_TYPE_IPV6)) {
2644 memset(&wc->masks.ipv6_dst, 0xff, sizeof wc->masks.ipv6_dst);
2645 return ipv6_is_all_hosts(&flow->ipv6_dst);
2646 } else {
2647 return false;
2648 }
2649 }
2650
2651 static void
2652 xlate_normal(struct xlate_ctx *ctx)
2653 {
2654 struct flow_wildcards *wc = ctx->wc;
2655 struct flow *flow = &ctx->xin->flow;
2656 struct xbundle *in_xbundle;
2657 struct xport *in_port;
2658 struct mac_entry *mac;
2659 void *mac_port;
2660 struct xvlan in_xvlan;
2661 struct xvlan xvlan;
2662 uint16_t vlan;
2663
2664 memset(&wc->masks.dl_src, 0xff, sizeof wc->masks.dl_src);
2665 memset(&wc->masks.dl_dst, 0xff, sizeof wc->masks.dl_dst);
2666 wc->masks.vlans[0].tci |= htons(VLAN_VID_MASK | VLAN_CFI);
2667
2668 in_xbundle = lookup_input_bundle(ctx, flow->in_port.ofp_port, &in_port);
2669 if (!in_xbundle) {
2670 xlate_report(ctx, OFT_WARN, "no input bundle, dropping");
2671 return;
2672 }
2673
2674 /* Drop malformed frames. */
2675 if (eth_type_vlan(flow->dl_type) &&
2676 !(flow->vlans[0].tci & htons(VLAN_CFI))) {
2677 if (ctx->xin->packet != NULL) {
2678 xlate_report_error(ctx, "dropping packet with partial "
2679 "VLAN tag received on port %s",
2680 in_xbundle->name);
2681 }
2682 xlate_report(ctx, OFT_WARN, "partial VLAN tag, dropping");
2683 return;
2684 }
2685
2686 /* Drop frames on bundles reserved for mirroring. */
2687 if (xbundle_mirror_out(ctx->xbridge, in_xbundle)) {
2688 if (ctx->xin->packet != NULL) {
2689 xlate_report_error(ctx, "dropping packet received on port %s, "
2690 "which is reserved exclusively for mirroring",
2691 in_xbundle->name);
2692 }
2693 xlate_report(ctx, OFT_WARN,
2694 "input port is mirror output port, dropping");
2695 return;
2696 }
2697
2698 /* Check VLAN. */
2699 xvlan_extract(flow, &in_xvlan);
2700 if (!input_vid_is_valid(ctx, in_xvlan.v[0].vid, in_xbundle)) {
2701 xlate_report(ctx, OFT_WARN,
2702 "disallowed VLAN VID for this input port, dropping");
2703 return;
2704 }
2705 xvlan_input_translate(in_xbundle, &in_xvlan, &xvlan);
2706 vlan = xvlan.v[0].vid;
2707
2708 /* Check other admissibility requirements. */
2709 if (in_port && !is_admissible(ctx, in_port, vlan)) {
2710 return;
2711 }
2712
2713 /* Learn source MAC. */
2714 bool is_grat_arp = is_gratuitous_arp(flow, wc);
2715 if (ctx->xin->allow_side_effects
2716 && flow->packet_type == htonl(PT_ETH)
2717 && in_port->pt_mode != NETDEV_PT_LEGACY_L3
2718 ) {
2719 update_learning_table(ctx, in_xbundle, flow->dl_src, vlan,
2720 is_grat_arp);
2721 }
2722 if (ctx->xin->xcache && in_xbundle != &ofpp_none_bundle) {
2723 struct xc_entry *entry;
2724
2725 /* Save just enough info to update mac learning table later. */
2726 entry = xlate_cache_add_entry(ctx->xin->xcache, XC_NORMAL);
2727 entry->normal.ofproto = ctx->xbridge->ofproto;
2728 entry->normal.in_port = flow->in_port.ofp_port;
2729 entry->normal.dl_src = flow->dl_src;
2730 entry->normal.vlan = vlan;
2731 entry->normal.is_gratuitous_arp = is_grat_arp;
2732 }
2733
2734 /* Determine output bundle. */
2735 if (mcast_snooping_enabled(ctx->xbridge->ms)
2736 && !eth_addr_is_broadcast(flow->dl_dst)
2737 && eth_addr_is_multicast(flow->dl_dst)
2738 && is_ip_any(flow)) {
2739 struct mcast_snooping *ms = ctx->xbridge->ms;
2740 struct mcast_group *grp = NULL;
2741
2742 if (is_igmp(flow, wc)) {
2743 /*
2744 * IGMP packets need to take the slow path, in order to be
2745 * processed for mdb updates. That will prevent expires
2746 * firing off even after hosts have sent reports.
2747 */
2748 ctx->xout->slow |= SLOW_ACTION;
2749
2750 memset(&wc->masks.tp_src, 0xff, sizeof wc->masks.tp_src);
2751 if (mcast_snooping_is_membership(flow->tp_src) ||
2752 mcast_snooping_is_query(flow->tp_src)) {
2753 if (ctx->xin->allow_side_effects && ctx->xin->packet) {
2754 update_mcast_snooping_table(ctx, flow, vlan,
2755 in_xbundle, ctx->xin->packet);
2756 }
2757 }
2758
2759 if (mcast_snooping_is_membership(flow->tp_src)) {
2760 ovs_rwlock_rdlock(&ms->rwlock);
2761 xlate_normal_mcast_send_mrouters(ctx, ms, in_xbundle, &xvlan);
2762 /* RFC4541: section 2.1.1, item 1: A snooping switch should
2763 * forward IGMP Membership Reports only to those ports where
2764 * multicast routers are attached. Alternatively stated: a
2765 * snooping switch should not forward IGMP Membership Reports
2766 * to ports on which only hosts are attached.
2767 * An administrative control may be provided to override this
2768 * restriction, allowing the report messages to be flooded to
2769 * other ports. */
2770 xlate_normal_mcast_send_rports(ctx, ms, in_xbundle, &xvlan);
2771 ovs_rwlock_unlock(&ms->rwlock);
2772 } else {
2773 xlate_report(ctx, OFT_DETAIL, "multicast traffic, flooding");
2774 xlate_normal_flood(ctx, in_xbundle, &xvlan);
2775 }
2776 return;
2777 } else if (is_mld(flow, wc)) {
2778 ctx->xout->slow |= SLOW_ACTION;
2779 if (ctx->xin->allow_side_effects && ctx->xin->packet) {
2780 update_mcast_snooping_table(ctx, flow, vlan,
2781 in_xbundle, ctx->xin->packet);
2782 }
2783 if (is_mld_report(flow, wc)) {
2784 ovs_rwlock_rdlock(&ms->rwlock);
2785 xlate_normal_mcast_send_mrouters(ctx, ms, in_xbundle, &xvlan);
2786 xlate_normal_mcast_send_rports(ctx, ms, in_xbundle, &xvlan);
2787 ovs_rwlock_unlock(&ms->rwlock);
2788 } else {
2789 xlate_report(ctx, OFT_DETAIL, "MLD query, flooding");
2790 xlate_normal_flood(ctx, in_xbundle, &xvlan);
2791 }
2792 } else {
2793 if (is_ip_local_multicast(flow, wc)) {
2794 /* RFC4541: section 2.1.2, item 2: Packets with a dst IP
2795 * address in the 224.0.0.x range which are not IGMP must
2796 * be forwarded on all ports */
2797 xlate_report(ctx, OFT_DETAIL,
2798 "RFC4541: section 2.1.2, item 2, flooding");
2799 xlate_normal_flood(ctx, in_xbundle, &xvlan);
2800 return;
2801 }
2802 }
2803
2804 /* forwarding to group base ports */
2805 ovs_rwlock_rdlock(&ms->rwlock);
2806 if (flow->dl_type == htons(ETH_TYPE_IP)) {
2807 grp = mcast_snooping_lookup4(ms, flow->nw_dst, vlan);
2808 } else if (flow->dl_type == htons(ETH_TYPE_IPV6)) {
2809 grp = mcast_snooping_lookup(ms, &flow->ipv6_dst, vlan);
2810 }
2811 if (grp) {
2812 xlate_normal_mcast_send_group(ctx, ms, grp, in_xbundle, &xvlan);
2813 xlate_normal_mcast_send_fports(ctx, ms, in_xbundle, &xvlan);
2814 xlate_normal_mcast_send_mrouters(ctx, ms, in_xbundle, &xvlan);
2815 } else {
2816 if (mcast_snooping_flood_unreg(ms)) {
2817 xlate_report(ctx, OFT_DETAIL,
2818 "unregistered multicast, flooding");
2819 xlate_normal_flood(ctx, in_xbundle, &xvlan);
2820 } else {
2821 xlate_normal_mcast_send_mrouters(ctx, ms, in_xbundle, &xvlan);
2822 xlate_normal_mcast_send_fports(ctx, ms, in_xbundle, &xvlan);
2823 }
2824 }
2825 ovs_rwlock_unlock(&ms->rwlock);
2826 } else {
2827 ovs_rwlock_rdlock(&ctx->xbridge->ml->rwlock);
2828 mac = mac_learning_lookup(ctx->xbridge->ml, flow->dl_dst, vlan);
2829 mac_port = mac ? mac_entry_get_port(ctx->xbridge->ml, mac) : NULL;
2830 ovs_rwlock_unlock(&ctx->xbridge->ml->rwlock);
2831
2832 if (mac_port) {
2833 struct xlate_cfg *xcfg = ovsrcu_get(struct xlate_cfg *, &xcfgp);
2834 struct xbundle *mac_xbundle = xbundle_lookup(xcfg, mac_port);
2835 if (mac_xbundle && mac_xbundle != in_xbundle) {
2836 xlate_report(ctx, OFT_DETAIL, "forwarding to learned port");
2837 output_normal(ctx, mac_xbundle, &xvlan);
2838 } else if (!mac_xbundle) {
2839 xlate_report(ctx, OFT_WARN,
2840 "learned port is unknown, dropping");
2841 } else {
2842 xlate_report(ctx, OFT_DETAIL,
2843 "learned port is input port, dropping");
2844 }
2845 } else {
2846 xlate_report(ctx, OFT_DETAIL,
2847 "no learned MAC for destination, flooding");
2848 xlate_normal_flood(ctx, in_xbundle, &xvlan);
2849 }
2850 }
2851 }
2852
2853 /* Appends a "sample" action for sFlow or IPFIX to 'ctx->odp_actions'. The
2854 * 'probability' is the number of packets out of UINT32_MAX to sample. The
2855 * 'cookie' is passed back in the callback for each sampled packet.
2856 * 'tunnel_out_port', if not ODPP_NONE, is added as the
2857 * OVS_USERSPACE_ATTR_EGRESS_TUN_PORT attribute. If 'include_actions',
2858 * an OVS_USERSPACE_ATTR_ACTIONS attribute is added. If
2859 * 'emit_set_tunnel', sample(sampling_port=1) would translate into
2860 * datapath sample action set(tunnel(...)), sample(...) and it is used
2861 * for sampling egress tunnel information.
2862 */
2863 static size_t
2864 compose_sample_action(struct xlate_ctx *ctx,
2865 const uint32_t probability,
2866 const struct user_action_cookie *cookie,
2867 const odp_port_t tunnel_out_port,
2868 bool include_actions)
2869 {
2870 if (probability == 0) {
2871 /* No need to generate sampling or the inner action. */
2872 return 0;
2873 }
2874
2875 /* If the slow path meter is configured by the controller,
2876 * insert a meter action before the user space action. */
2877 struct ofproto *ofproto = &ctx->xin->ofproto->up;
2878 uint32_t meter_id = ofproto->slowpath_meter_id;
2879
2880 /* When meter action is not required, avoid generate sample action
2881 * for 100% sampling rate. */
2882 bool is_sample = probability < UINT32_MAX || meter_id != UINT32_MAX;
2883 size_t sample_offset, actions_offset;
2884 if (is_sample) {
2885 sample_offset = nl_msg_start_nested(ctx->odp_actions,
2886 OVS_ACTION_ATTR_SAMPLE);
2887 nl_msg_put_u32(ctx->odp_actions, OVS_SAMPLE_ATTR_PROBABILITY,
2888 probability);
2889 actions_offset = nl_msg_start_nested(ctx->odp_actions,
2890 OVS_SAMPLE_ATTR_ACTIONS);
2891 }
2892
2893 if (meter_id != UINT32_MAX) {
2894 nl_msg_put_u32(ctx->odp_actions, OVS_ACTION_ATTR_METER, meter_id);
2895 }
2896
2897 odp_port_t odp_port = ofp_port_to_odp_port(
2898 ctx->xbridge, ctx->xin->flow.in_port.ofp_port);
2899 uint32_t pid = dpif_port_get_pid(ctx->xbridge->dpif, odp_port,
2900 flow_hash_5tuple(&ctx->xin->flow, 0));
2901 int cookie_offset = odp_put_userspace_action(pid, cookie, sizeof *cookie,
2902 tunnel_out_port,
2903 include_actions,
2904 ctx->odp_actions);
2905
2906 if (is_sample) {
2907 nl_msg_end_nested(ctx->odp_actions, actions_offset);
2908 nl_msg_end_nested(ctx->odp_actions, sample_offset);
2909 }
2910
2911 return cookie_offset;
2912 }
2913
2914 /* If sFLow is not enabled, returns 0 without doing anything.
2915 *
2916 * If sFlow is enabled, appends a template "sample" action to the ODP actions
2917 * in 'ctx'. This action is a template because some of the information needed
2918 * to fill it out is not available until flow translation is complete. In this
2919 * case, this functions returns an offset, which is always nonzero, to pass
2920 * later to fix_sflow_action() to fill in the rest of the template. */
2921 static size_t
2922 compose_sflow_action(struct xlate_ctx *ctx)
2923 {
2924 struct dpif_sflow *sflow = ctx->xbridge->sflow;
2925 if (!sflow || ctx->xin->flow.in_port.ofp_port == OFPP_NONE) {
2926 return 0;
2927 }
2928
2929 struct user_action_cookie cookie = {
2930 .type = USER_ACTION_COOKIE_SFLOW,
2931 .ofp_in_port = ctx->xin->flow.in_port.ofp_port,
2932 .ofproto_uuid = ctx->xbridge->ofproto->uuid
2933 };
2934 return compose_sample_action(ctx, dpif_sflow_get_probability(sflow),
2935 &cookie, ODPP_NONE, true);
2936 }
2937
2938 /* If flow IPFIX is enabled, make sure IPFIX flow sample action
2939 * at egress point of tunnel port is just in front of corresponding
2940 * output action. If bridge IPFIX is enabled, this appends an IPFIX
2941 * sample action to 'ctx->odp_actions'. */
2942 static void
2943 compose_ipfix_action(struct xlate_ctx *ctx, odp_port_t output_odp_port)
2944 {
2945 struct dpif_ipfix *ipfix = ctx->xbridge->ipfix;
2946 odp_port_t tunnel_out_port = ODPP_NONE;
2947
2948 if (!ipfix || ctx->xin->flow.in_port.ofp_port == OFPP_NONE) {
2949 return;
2950 }
2951
2952 /* For input case, output_odp_port is ODPP_NONE, which is an invalid port
2953 * number. */
2954 if (output_odp_port == ODPP_NONE &&
2955 !dpif_ipfix_get_bridge_exporter_input_sampling(ipfix)) {
2956 return;
2957 }
2958
2959 /* For output case, output_odp_port is valid. */
2960 if (output_odp_port != ODPP_NONE) {
2961 if (!dpif_ipfix_get_bridge_exporter_output_sampling(ipfix)) {
2962 return;
2963 }
2964 /* If tunnel sampling is enabled, put an additional option attribute:
2965 * OVS_USERSPACE_ATTR_TUNNEL_OUT_PORT
2966 */
2967 if (dpif_ipfix_get_bridge_exporter_tunnel_sampling(ipfix) &&
2968 dpif_ipfix_is_tunnel_port(ipfix, output_odp_port) ) {
2969 tunnel_out_port = output_odp_port;
2970 }
2971 }
2972
2973 struct user_action_cookie cookie = {
2974 .type = USER_ACTION_COOKIE_IPFIX,
2975 .ofp_in_port = ctx->xin->flow.in_port.ofp_port,
2976 .ofproto_uuid = ctx->xbridge->ofproto->uuid,
2977 .ipfix.output_odp_port = output_odp_port
2978 };
2979 compose_sample_action(ctx,
2980 dpif_ipfix_get_bridge_exporter_probability(ipfix),
2981 &cookie, tunnel_out_port, false);
2982 }
2983
2984 /* Fix "sample" action according to data collected while composing ODP actions,
2985 * as described in compose_sflow_action().
2986 *
2987 * 'user_cookie_offset' must be the offset returned by
2988 * compose_sflow_action(). */
2989 static void
2990 fix_sflow_action(struct xlate_ctx *ctx, unsigned int user_cookie_offset)
2991 {
2992 const struct flow *base = &ctx->base_flow;
2993 struct user_action_cookie *cookie;
2994
2995 cookie = ofpbuf_at(ctx->odp_actions, user_cookie_offset, sizeof *cookie);
2996 ovs_assert(cookie->type == USER_ACTION_COOKIE_SFLOW);
2997
2998 cookie->sflow.vlan_tci = base->vlans[0].tci;
2999
3000 /* See http://www.sflow.org/sflow_version_5.txt (search for "Input/output
3001 * port information") for the interpretation of cookie->output. */
3002 switch (ctx->sflow_n_outputs) {
3003 case 0:
3004 /* 0x40000000 | 256 means "packet dropped for unknown reason". */
3005 cookie->sflow.output = 0x40000000 | 256;
3006 break;
3007
3008 case 1:
3009 cookie->sflow.output = dpif_sflow_odp_port_to_ifindex(
3010 ctx->xbridge->sflow, ctx->sflow_odp_port);
3011 if (cookie->sflow.output) {
3012 break;
3013 }
3014 /* Fall through. */
3015 default:
3016 /* 0x80000000 means "multiple output ports. */
3017 cookie->sflow.output = 0x80000000 | ctx->sflow_n_outputs;
3018 break;
3019 }
3020 }
3021
3022 static bool
3023 process_special(struct xlate_ctx *ctx, const struct xport *xport)
3024 {
3025 const struct flow *flow = &ctx->xin->flow;
3026 struct flow_wildcards *wc = ctx->wc;
3027 const struct xbridge *xbridge = ctx->xbridge;
3028 const struct dp_packet *packet = ctx->xin->packet;
3029 enum slow_path_reason slow;
3030
3031 if (!xport) {
3032 slow = 0;
3033 } else if (xport->cfm && cfm_should_process_flow(xport->cfm, flow, wc)) {
3034 if (packet) {
3035 cfm_process_heartbeat(xport->cfm, packet);
3036 }
3037 slow = SLOW_CFM;
3038 } else if (xport->bfd && bfd_should_process_flow(xport->bfd, flow, wc)) {
3039 if (packet) {
3040 bfd_process_packet(xport->bfd, flow, packet);
3041 /* If POLL received, immediately sends FINAL back. */
3042 if (bfd_should_send_packet(xport->bfd)) {
3043 ofproto_dpif_monitor_port_send_soon(xport->ofport);
3044 }
3045 }
3046 slow = SLOW_BFD;
3047 } else if (xport->xbundle && xport->xbundle->lacp
3048 && flow->dl_type == htons(ETH_TYPE_LACP)) {
3049 if (packet) {
3050 lacp_process_packet(xport->xbundle->lacp, xport->ofport, packet);
3051 }
3052 slow = SLOW_LACP;
3053 } else if ((xbridge->stp || xbridge->rstp) &&
3054 stp_should_process_flow(flow, wc)) {
3055 if (packet) {
3056 xbridge->stp
3057 ? stp_process_packet(xport, packet)
3058 : rstp_process_packet(xport, packet);
3059 }
3060 slow = SLOW_STP;
3061 } else if (xport->lldp && lldp_should_process_flow(xport->lldp, flow)) {
3062 if (packet) {
3063 lldp_process_packet(xport->lldp, packet);
3064 }
3065 slow = SLOW_LLDP;
3066 } else {
3067 slow = 0;
3068 }
3069
3070 if (slow) {
3071 ctx->xout->slow |= slow;
3072 return true;
3073 } else {
3074 return false;
3075 }
3076 }
3077
3078 static int
3079 tnl_route_lookup_flow(const struct flow *oflow,
3080 struct in6_addr *ip, struct in6_addr *src,
3081 struct xport **out_port)
3082 {
3083 char out_dev[IFNAMSIZ];
3084 struct xbridge *xbridge;
3085 struct xlate_cfg *xcfg;
3086 struct in6_addr gw;
3087 struct in6_addr dst;
3088
3089 dst = flow_tnl_dst(&oflow->tunnel);
3090 if (!ovs_router_lookup(oflow->pkt_mark, &dst, out_dev, src, &gw)) {
3091 return -ENOENT;
3092 }
3093
3094 if (ipv6_addr_is_set(&gw) &&
3095 (!IN6_IS_ADDR_V4MAPPED(&gw) || in6_addr_get_mapped_ipv4(&gw))) {
3096 *ip = gw;
3097 } else {
3098 *ip = dst;
3099 }
3100
3101 xcfg = ovsrcu_get(struct xlate_cfg *, &xcfgp);
3102 ovs_assert(xcfg);
3103
3104 HMAP_FOR_EACH (xbridge, hmap_node, &xcfg->xbridges) {
3105 if (!strncmp(xbridge->name, out_dev, IFNAMSIZ)) {
3106 struct xport *port;
3107
3108 HMAP_FOR_EACH (port, ofp_node, &xbridge->xports) {
3109 if (!strncmp(netdev_get_name(port->netdev), out_dev, IFNAMSIZ)) {
3110 *out_port = port;
3111 return 0;
3112 }
3113 }
3114 }
3115 }
3116 return -ENOENT;
3117 }
3118
3119 static int
3120 compose_table_xlate(struct xlate_ctx *ctx, const struct xport *out_dev,
3121 struct dp_packet *packet)
3122 {
3123 struct xbridge *xbridge = out_dev->xbridge;
3124 struct ofpact_output output;
3125 struct flow flow;
3126
3127 ofpact_init(&output.ofpact, OFPACT_OUTPUT, sizeof output);
3128 flow_extract(packet, &flow);
3129 flow.in_port.ofp_port = out_dev->ofp_port;
3130 output.port = OFPP_TABLE;
3131 output.max_len = 0;
3132
3133 return ofproto_dpif_execute_actions__(xbridge->ofproto,
3134 ctx->xin->tables_version, &flow,
3135 NULL, &output.ofpact, sizeof output,
3136 ctx->depth, ctx->resubmits, packet);
3137 }
3138
3139 static void
3140 tnl_send_nd_request(struct xlate_ctx *ctx, const struct xport *out_dev,
3141 const struct eth_addr eth_src,
3142 struct in6_addr * ipv6_src, struct in6_addr * ipv6_dst)
3143 {
3144 struct dp_packet packet;
3145
3146 dp_packet_init(&packet, 0);
3147 compose_nd_ns(&packet, eth_src, ipv6_src, ipv6_dst);
3148 compose_table_xlate(ctx, out_dev, &packet);
3149 dp_packet_uninit(&packet);
3150 }
3151
3152 static void
3153 tnl_send_arp_request(struct xlate_ctx *ctx, const struct xport *out_dev,
3154 const struct eth_addr eth_src,
3155 ovs_be32 ip_src, ovs_be32 ip_dst)
3156 {
3157 struct dp_packet packet;
3158
3159 dp_packet_init(&packet, 0);
3160 compose_arp(&packet, ARP_OP_REQUEST,
3161 eth_src, eth_addr_zero, true, ip_src, ip_dst);
3162
3163 compose_table_xlate(ctx, out_dev, &packet);
3164 dp_packet_uninit(&packet);
3165 }
3166
3167 static void
3168 propagate_tunnel_data_to_flow__(struct flow *dst_flow,
3169 const struct flow *src_flow,
3170 struct eth_addr dmac, struct eth_addr smac,
3171 struct in6_addr s_ip6, ovs_be32 s_ip,
3172 bool is_tnl_ipv6, uint8_t nw_proto)
3173 {
3174 dst_flow->dl_dst = dmac;
3175 dst_flow->dl_src = smac;
3176
3177 dst_flow->packet_type = htonl(PT_ETH);
3178 dst_flow->nw_dst = src_flow->tunnel.ip_dst;
3179 dst_flow->nw_src = src_flow->tunnel.ip_src;
3180 dst_flow->ipv6_dst = src_flow->tunnel.ipv6_dst;
3181 dst_flow->ipv6_src = src_flow->tunnel.ipv6_src;
3182
3183 dst_flow->nw_tos = src_flow->tunnel.ip_tos;
3184 dst_flow->nw_ttl = src_flow->tunnel.ip_ttl;
3185 dst_flow->tp_dst = src_flow->tunnel.tp_dst;
3186 dst_flow->tp_src = src_flow->tunnel.tp_src;
3187
3188 if (is_tnl_ipv6) {
3189 dst_flow->dl_type = htons(ETH_TYPE_IPV6);
3190 if (ipv6_mask_is_any(&dst_flow->ipv6_src)
3191 && !ipv6_mask_is_any(&s_ip6)) {
3192 dst_flow->ipv6_src = s_ip6;
3193 }
3194 } else {
3195 dst_flow->dl_type = htons(ETH_TYPE_IP);
3196 if (dst_flow->nw_src == 0 && s_ip) {
3197 dst_flow->nw_src = s_ip;
3198 }
3199 }
3200 dst_flow->nw_proto = nw_proto;
3201 }
3202
3203 /*
3204 * Populate the 'flow' and 'base_flow' L3 fields to do the post tunnel push
3205 * translations.
3206 */
3207 static void
3208 propagate_tunnel_data_to_flow(struct xlate_ctx *ctx, struct eth_addr dmac,
3209 struct eth_addr smac, struct in6_addr s_ip6,
3210 ovs_be32 s_ip, bool is_tnl_ipv6,
3211 enum ovs_vport_type tnl_type)
3212 {
3213 struct flow *base_flow, *flow;
3214 flow = &ctx->xin->flow;
3215 base_flow = &ctx->base_flow;
3216 uint8_t nw_proto = 0;
3217
3218 switch (tnl_type) {
3219 case OVS_VPORT_TYPE_GRE:
3220 nw_proto = IPPROTO_GRE;
3221 break;
3222 case OVS_VPORT_TYPE_VXLAN:
3223 case OVS_VPORT_TYPE_GENEVE:
3224 nw_proto = IPPROTO_UDP;
3225 break;
3226 case OVS_VPORT_TYPE_LISP:
3227 case OVS_VPORT_TYPE_STT:
3228 case OVS_VPORT_TYPE_UNSPEC:
3229 case OVS_VPORT_TYPE_NETDEV:
3230 case OVS_VPORT_TYPE_INTERNAL:
3231 case __OVS_VPORT_TYPE_MAX:
3232 default:
3233 OVS_NOT_REACHED();
3234 }
3235 /*
3236 * Update base_flow first followed by flow as the dst_flow gets modified
3237 * in the function.
3238 */
3239 propagate_tunnel_data_to_flow__(base_flow, flow, dmac, smac, s_ip6, s_ip,
3240 is_tnl_ipv6, nw_proto);
3241 propagate_tunnel_data_to_flow__(flow, flow, dmac, smac, s_ip6, s_ip,
3242 is_tnl_ipv6, nw_proto);
3243 }
3244
3245 static int
3246 native_tunnel_output(struct xlate_ctx *ctx, const struct xport *xport,
3247 const struct flow *flow, odp_port_t tunnel_odp_port,
3248 bool truncate)
3249 {
3250 struct netdev_tnl_build_header_params tnl_params;
3251 struct ovs_action_push_tnl tnl_push_data;
3252 struct xport *out_dev = NULL;
3253 ovs_be32 s_ip = 0, d_ip = 0;
3254 struct in6_addr s_ip6 = in6addr_any;
3255 struct in6_addr d_ip6 = in6addr_any;
3256 struct eth_addr smac;
3257 struct eth_addr dmac;
3258 int err;
3259 char buf_sip6[INET6_ADDRSTRLEN];
3260 char buf_dip6[INET6_ADDRSTRLEN];
3261
3262 /* Structures to backup Ethernet and IP of base_flow. */
3263 struct flow old_base_flow;
3264 struct flow old_flow;
3265
3266 /* Backup flow & base_flow data. */
3267 memcpy(&old_base_flow, &ctx->base_flow, sizeof old_base_flow);
3268 memcpy(&old_flow, &ctx->xin->flow, sizeof old_flow);
3269
3270 err = tnl_route_lookup_flow(flow, &d_ip6, &s_ip6, &out_dev);
3271 if (err) {
3272 xlate_report(ctx, OFT_WARN, "native tunnel routing failed");
3273 return err;
3274 }
3275
3276 xlate_report(ctx, OFT_DETAIL, "tunneling to %s via %s",
3277 ipv6_string_mapped(buf_dip6, &d_ip6),
3278 netdev_get_name(out_dev->netdev));
3279
3280 /* Use mac addr of bridge port of the peer. */
3281 err = netdev_get_etheraddr(out_dev->netdev, &smac);
3282 if (err) {
3283 xlate_report(ctx, OFT_WARN,
3284 "tunnel output device lacks Ethernet address");
3285 return err;
3286 }
3287
3288 d_ip = in6_addr_get_mapped_ipv4(&d_ip6);
3289 if (d_ip) {
3290 s_ip = in6_addr_get_mapped_ipv4(&s_ip6);
3291 }
3292
3293 err = tnl_neigh_lookup(out_dev->xbridge->name, &d_ip6, &dmac);
3294 if (err) {
3295 xlate_report(ctx, OFT_DETAIL,
3296 "neighbor cache miss for %s on bridge %s, "
3297 "sending %s request",
3298 buf_dip6, out_dev->xbridge->name, d_ip ? "ARP" : "ND");
3299 if (d_ip) {
3300 tnl_send_arp_request(ctx, out_dev, smac, s_ip, d_ip);
3301 } else {
3302 tnl_send_nd_request(ctx, out_dev, smac, &s_ip6, &d_ip6);
3303 }
3304 return err;
3305 }
3306
3307 if (ctx->xin->xcache) {
3308 struct xc_entry *entry;
3309
3310 entry = xlate_cache_add_entry(ctx->xin->xcache, XC_TNL_NEIGH);
3311 ovs_strlcpy(entry->tnl_neigh_cache.br_name, out_dev->xbridge->name,
3312 sizeof entry->tnl_neigh_cache.br_name);
3313 entry->tnl_neigh_cache.d_ipv6 = d_ip6;
3314 }
3315
3316 xlate_report(ctx, OFT_DETAIL, "tunneling from "ETH_ADDR_FMT" %s"
3317 " to "ETH_ADDR_FMT" %s",
3318 ETH_ADDR_ARGS(smac), ipv6_string_mapped(buf_sip6, &s_ip6),
3319 ETH_ADDR_ARGS(dmac), buf_dip6);
3320
3321 netdev_init_tnl_build_header_params(&tnl_params, flow, &s_ip6, dmac, smac);
3322 err = tnl_port_build_header(xport->ofport, &tnl_push_data, &tnl_params);
3323 if (err) {
3324 return err;
3325 }
3326 tnl_push_data.tnl_port = tunnel_odp_port;
3327 tnl_push_data.out_port = out_dev->odp_port;
3328
3329 /* After tunnel header has been added, MAC and IP data of flow and
3330 * base_flow need to be set properly, since there is not recirculation
3331 * any more when sending packet to tunnel. */
3332
3333 propagate_tunnel_data_to_flow(ctx, dmac, smac, s_ip6,
3334 s_ip, tnl_params.is_ipv6,
3335 tnl_push_data.tnl_type);
3336
3337 size_t clone_ofs = 0;
3338 size_t push_action_size;
3339
3340 clone_ofs = nl_msg_start_nested(ctx->odp_actions, OVS_ACTION_ATTR_CLONE);
3341 odp_put_tnl_push_action(ctx->odp_actions, &tnl_push_data);
3342 push_action_size = ctx->odp_actions->size;
3343
3344 if (!truncate) {
3345 const struct dpif_flow_stats *backup_resubmit_stats;
3346 struct xlate_cache *backup_xcache;
3347 struct flow_wildcards *backup_wc, wc;
3348 bool backup_side_effects;
3349 const struct dp_packet *backup_packet;
3350
3351 memset(&wc, 0 , sizeof wc);
3352 backup_wc = ctx->wc;
3353 ctx->wc = &wc;
3354 ctx->xin->wc = NULL;
3355 backup_resubmit_stats = ctx->xin->resubmit_stats;
3356 backup_xcache = ctx->xin->xcache;
3357 backup_side_effects = ctx->xin->allow_side_effects;
3358 backup_packet = ctx->xin->packet;
3359
3360 ctx->xin->resubmit_stats = NULL;
3361 ctx->xin->xcache = xlate_cache_new(); /* Use new temporary cache. */
3362 ctx->xin->allow_side_effects = false;
3363 ctx->xin->packet = NULL;
3364
3365 /* Push the cache entry for the tunnel first. */
3366 struct xc_entry *entry;
3367 entry = xlate_cache_add_entry(ctx->xin->xcache, XC_TUNNEL_HEADER);
3368 entry->tunnel_hdr.hdr_size = tnl_push_data.header_len;
3369 entry->tunnel_hdr.operation = ADD;
3370
3371 patch_port_output(ctx, xport, out_dev);
3372
3373 /* Similar to the stats update in revalidation, the x_cache entries
3374 * are populated by the previous translation are used to update the
3375 * stats correctly.
3376 */
3377 if (backup_resubmit_stats) {
3378 struct dpif_flow_stats stats = *backup_resubmit_stats;
3379 xlate_push_stats(ctx->xin->xcache, &stats);
3380 }
3381 xlate_cache_steal_entries(backup_xcache, ctx->xin->xcache);
3382
3383 if (ctx->odp_actions->size > push_action_size) {
3384 nl_msg_end_non_empty_nested(ctx->odp_actions, clone_ofs);
3385 } else {
3386 nl_msg_cancel_nested(ctx->odp_actions, clone_ofs);
3387 /* XXX : There is no real use-case for a tunnel push without
3388 * any post actions. However keeping it now
3389 * as is to make the 'make check' happy. Should remove when all the
3390 * make check tunnel test case does something meaningful on a
3391 * tunnel encap packets.
3392 */
3393 odp_put_tnl_push_action(ctx->odp_actions, &tnl_push_data);
3394 }
3395
3396 /* Restore context status. */
3397 ctx->xin->resubmit_stats = backup_resubmit_stats;
3398 xlate_cache_delete(ctx->xin->xcache);
3399 ctx->xin->xcache = backup_xcache;
3400 ctx->xin->allow_side_effects = backup_side_effects;
3401 ctx->xin->packet = backup_packet;
3402 ctx->wc = backup_wc;
3403 } else {
3404 /* In order to maintain accurate stats, use recirc for
3405 * natvie tunneling. */
3406 nl_msg_put_u32(ctx->odp_actions, OVS_ACTION_ATTR_RECIRC, 0);
3407 nl_msg_end_nested(ctx->odp_actions, clone_ofs);
3408 }
3409
3410 /* Restore the flows after the translation. */
3411 memcpy(&ctx->xin->flow, &old_flow, sizeof ctx->xin->flow);
3412 memcpy(&ctx->base_flow, &old_base_flow, sizeof ctx->base_flow);
3413 return 0;
3414 }
3415
3416 static void
3417 xlate_commit_actions(struct xlate_ctx *ctx)
3418 {
3419 bool use_masked = ctx->xbridge->support.masked_set_action;
3420
3421 ctx->xout->slow |= commit_odp_actions(&ctx->xin->flow, &ctx->base_flow,
3422 ctx->odp_actions, ctx->wc,
3423 use_masked, ctx->pending_encap,
3424 ctx->encap_data);
3425 ctx->pending_encap = false;
3426 ofpbuf_delete(ctx->encap_data);
3427 ctx->encap_data = NULL;
3428 }
3429
3430 static void
3431 clear_conntrack(struct xlate_ctx *ctx)
3432 {
3433 ctx->conntracked = false;
3434 flow_clear_conntrack(&ctx->xin->flow);
3435 }
3436
3437 static bool
3438 xlate_flow_is_protected(const struct xlate_ctx *ctx, const struct flow *flow, const struct xport *xport_out)
3439 {
3440 const struct xport *xport_in;
3441
3442 if (!xport_out) {
3443 return false;
3444 }
3445
3446 xport_in = get_ofp_port(ctx->xbridge, flow->in_port.ofp_port);
3447
3448 return (xport_in && xport_in->xbundle && xport_out->xbundle &&
3449 xport_in->xbundle->protected && xport_out->xbundle->protected);
3450 }
3451
3452 /* Function handles when a packet is sent from one bridge to another bridge.
3453 *
3454 * The bridges are internally connected, either with patch ports or with
3455 * tunnel ports.
3456 *
3457 * The output action to another bridge causes translation to continue within
3458 * the next bridge. This process can be recursive; the next bridge can
3459 * output yet to another bridge.
3460 *
3461 * The translated actions from the second bridge onwards are enclosed within
3462 * the clone action, so that any modification to the packet will not be visible
3463 * to the remaining actions of the originating bridge.
3464 */
3465 static void
3466 patch_port_output(struct xlate_ctx *ctx, const struct xport *in_dev,
3467 struct xport *out_dev)
3468 {
3469 struct flow *flow = &ctx->xin->flow;
3470 struct flow old_flow = ctx->xin->flow;
3471 struct flow_tnl old_flow_tnl_wc = ctx->wc->masks.tunnel;
3472 bool old_conntrack = ctx->conntracked;
3473 bool old_was_mpls = ctx->was_mpls;
3474 ovs_version_t old_version = ctx->xin->tables_version;
3475 struct ofpbuf old_stack = ctx->stack;
3476 uint8_t new_stack[1024];
3477 struct ofpbuf old_action_set = ctx->action_set;
3478 struct ovs_list *old_trace = ctx->xin->trace;
3479 uint64_t actset_stub[1024 / 8];
3480
3481 ofpbuf_use_stub(&ctx->stack, new_stack, sizeof new_stack);
3482 ofpbuf_use_stub(&ctx->action_set, actset_stub, sizeof actset_stub);
3483 flow->in_port.ofp_port = out_dev->ofp_port;
3484 flow->metadata = htonll(0);
3485 memset(&flow->tunnel, 0, sizeof flow->tunnel);
3486 memset(&ctx->wc->masks.tunnel, 0, sizeof ctx->wc->masks.tunnel);
3487 flow->tunnel.metadata.tab =
3488 ofproto_get_tun_tab(&out_dev->xbridge->ofproto->up);
3489 ctx->wc->masks.tunnel.metadata.tab = flow->tunnel.metadata.tab;
3490 memset(flow->regs, 0, sizeof flow->regs);
3491 flow->actset_output = OFPP_UNSET;
3492 clear_conntrack(ctx);
3493 ctx->xin->trace = xlate_report(ctx, OFT_BRIDGE, "bridge(\"%s\")",
3494 out_dev->xbridge->name);
3495 mirror_mask_t old_mirrors = ctx->mirrors;
3496 bool independent_mirrors = out_dev->xbridge != ctx->xbridge;
3497 if (independent_mirrors) {
3498 ctx->mirrors = 0;
3499 }
3500 ctx->xbridge = out_dev->xbridge;
3501
3502 /* The bridge is now known so obtain its table version. */
3503 ctx->xin->tables_version
3504 = ofproto_dpif_get_tables_version(ctx->xbridge->ofproto);
3505
3506 if (!process_special(ctx, out_dev) && may_receive(out_dev, ctx)) {
3507 if (xport_stp_forward_state(out_dev) &&
3508 xport_rstp_forward_state(out_dev)) {
3509 xlate_table_action(ctx, flow->in_port.ofp_port, 0, true, true,
3510 false, true, clone_xlate_actions);
3511 if (!ctx->freezing) {
3512 xlate_action_set(ctx);
3513 }
3514 if (ctx->freezing) {
3515 finish_freezing(ctx);
3516 }
3517 } else {
3518 /* Forwarding is disabled by STP and RSTP. Let OFPP_NORMAL and
3519 * the learning action look at the packet, then drop it. */
3520 struct flow old_base_flow = ctx->base_flow;
3521 size_t old_size = ctx->odp_actions->size;
3522 mirror_mask_t old_mirrors2 = ctx->mirrors;
3523
3524 xlate_table_action(ctx, flow->in_port.ofp_port, 0, true, true,
3525 false, true, clone_xlate_actions);
3526 ctx->mirrors = old_mirrors2;
3527 ctx->base_flow = old_base_flow;
3528 ctx->odp_actions->size = old_size;
3529
3530 /* Undo changes that may have been done for freezing. */
3531 ctx_cancel_freeze(ctx);
3532 }
3533 }
3534
3535 ctx->xin->trace = old_trace;
3536 if (independent_mirrors) {
3537 ctx->mirrors = old_mirrors;
3538 }
3539 ctx->xin->flow = old_flow;
3540 ctx->xbridge = in_dev->xbridge;
3541 ofpbuf_uninit(&ctx->action_set);
3542 ctx->action_set = old_action_set;
3543 ofpbuf_uninit(&ctx->stack);
3544 ctx->stack = old_stack;
3545
3546 /* Restore calling bridge's lookup version. */
3547 ctx->xin->tables_version = old_version;
3548
3549 /* Restore to calling bridge tunneling information */
3550 ctx->wc->masks.tunnel = old_flow_tnl_wc;
3551
3552 /* The out bridge popping MPLS should have no effect on the original
3553 * bridge. */
3554 ctx->was_mpls = old_was_mpls;
3555
3556 /* The out bridge's conntrack execution should have no effect on the
3557 * original bridge. */
3558 ctx->conntracked = old_conntrack;
3559
3560 /* The fact that the out bridge exits (for any reason) does not mean
3561 * that the original bridge should exit. Specifically, if the out
3562 * bridge freezes translation, the original bridge must continue
3563 * processing with the original, not the frozen packet! */
3564 ctx->exit = false;
3565
3566 /* Out bridge errors do not propagate back. */
3567 ctx->error = XLATE_OK;
3568
3569 if (ctx->xin->resubmit_stats) {
3570 netdev_vport_inc_tx(in_dev->netdev, ctx->xin->resubmit_stats);
3571 netdev_vport_inc_rx(out_dev->netdev, ctx->xin->resubmit_stats);
3572 if (out_dev->bfd) {
3573 bfd_account_rx(out_dev->bfd, ctx->xin->resubmit_stats);
3574 }
3575 }
3576 if (ctx->xin->xcache) {
3577 struct xc_entry *entry;
3578
3579 entry = xlate_cache_add_entry(ctx->xin->xcache, XC_NETDEV);
3580 entry->dev.tx = netdev_ref(in_dev->netdev);
3581 entry->dev.rx = netdev_ref(out_dev->netdev);
3582 entry->dev.bfd = bfd_ref(out_dev->bfd);
3583 }
3584 }
3585
3586 static bool
3587 check_output_prerequisites(struct xlate_ctx *ctx,
3588 const struct xport *xport,
3589 struct flow *flow,
3590 bool check_stp)
3591 {
3592 struct flow_wildcards *wc = ctx->wc;
3593
3594 if (!xport) {
3595 xlate_report(ctx, OFT_WARN, "Nonexistent output port");
3596 return false;
3597 } else if (xport->config & OFPUTIL_PC_NO_FWD) {
3598 xlate_report(ctx, OFT_DETAIL, "OFPPC_NO_FWD set, skipping output");
3599 return false;
3600 } else if (ctx->mirror_snaplen != 0 && xport->odp_port == ODPP_NONE) {
3601 xlate_report(ctx, OFT_WARN,
3602 "Mirror truncate to ODPP_NONE, skipping output");
3603 return false;
3604 } else if (xlate_flow_is_protected(ctx, flow, xport)) {
3605 xlate_report(ctx, OFT_WARN,
3606 "Flow is between protected ports, skipping output.");
3607 return false;
3608 } else if (check_stp) {
3609 if (is_stp(&ctx->base_flow)) {
3610 if (!xport_stp_should_forward_bpdu(xport) &&
3611 !xport_rstp_should_manage_bpdu(xport)) {
3612 if (ctx->xbridge->stp != NULL) {
3613 xlate_report(ctx, OFT_WARN,
3614 "STP not in listening state, "
3615 "skipping bpdu output");
3616 } else if (ctx->xbridge->rstp != NULL) {
3617 xlate_report(ctx, OFT_WARN,
3618 "RSTP not managing BPDU in this state, "
3619 "skipping bpdu output");
3620 }
3621 return false;
3622 }
3623 } else if ((xport->cfm && cfm_should_process_flow(xport->cfm, flow, wc))
3624 || (xport->bfd && bfd_should_process_flow(xport->bfd, flow,
3625 wc))) {
3626 /* Pass; STP should not block link health detection. */
3627 } else if (!xport_stp_forward_state(xport) ||
3628 !xport_rstp_forward_state(xport)) {
3629 if (ctx->xbridge->stp != NULL) {
3630 xlate_report(ctx, OFT_WARN,
3631 "STP not in forwarding state, skipping output");
3632 } else if (ctx->xbridge->rstp != NULL) {
3633 xlate_report(ctx, OFT_WARN,
3634 "RSTP not in forwarding state, skipping output");
3635 }
3636 return false;
3637 }
3638 }
3639
3640 if (xport->pt_mode == NETDEV_PT_LEGACY_L2 &&
3641 flow->packet_type != htonl(PT_ETH)) {
3642 xlate_report(ctx, OFT_WARN, "Trying to send non-Ethernet packet "
3643 "through legacy L2 port. Dropping packet.");
3644 return false;
3645 }
3646
3647 return true;
3648 }
3649
3650 static bool
3651 terminate_native_tunnel(struct xlate_ctx *ctx, ofp_port_t ofp_port,
3652 struct flow *flow, struct flow_wildcards *wc,
3653 odp_port_t *tnl_port)
3654 {
3655 *tnl_port = ODPP_NONE;
3656
3657 /* XXX: Write better Filter for tunnel port. We can use in_port
3658 * in tunnel-port flow to avoid these checks completely. */
3659 if (ofp_port == OFPP_LOCAL &&
3660 ovs_native_tunneling_is_on(ctx->xbridge->ofproto)) {
3661 *tnl_port = tnl_port_map_lookup(flow, wc);
3662 }
3663
3664 return *tnl_port != ODPP_NONE;
3665 }
3666
3667 static void
3668 compose_output_action__(struct xlate_ctx *ctx, ofp_port_t ofp_port,
3669 const struct xlate_bond_recirc *xr, bool check_stp,
3670 bool is_last_action OVS_UNUSED, bool truncate)
3671 {
3672 const struct xport *xport = get_ofp_port(ctx->xbridge, ofp_port);
3673 struct flow_wildcards *wc = ctx->wc;
3674 struct flow *flow = &ctx->xin->flow;
3675 struct flow_tnl flow_tnl;
3676 union flow_vlan_hdr flow_vlans[FLOW_MAX_VLAN_HEADERS];
3677 uint8_t flow_nw_tos;
3678 odp_port_t out_port, odp_port, odp_tnl_port;
3679 bool is_native_tunnel = false;
3680 uint8_t dscp;
3681 struct eth_addr flow_dl_dst = flow->dl_dst;
3682 struct eth_addr flow_dl_src = flow->dl_src;
3683 ovs_be32 flow_packet_type = flow->packet_type;
3684 ovs_be16 flow_dl_type = flow->dl_type;
3685
3686 /* If 'struct flow' gets additional metadata, we'll need to zero it out
3687 * before traversing a patch port. */
3688 BUILD_ASSERT_DECL(FLOW_WC_SEQ == 40);
3689 memset(&flow_tnl, 0, sizeof flow_tnl);
3690
3691 if (!check_output_prerequisites(ctx, xport, flow, check_stp)) {
3692 return;
3693 }
3694
3695 if (flow->packet_type == htonl(PT_ETH)) {
3696 /* Strip Ethernet header for legacy L3 port. */
3697 if (xport->pt_mode == NETDEV_PT_LEGACY_L3) {
3698 flow->packet_type = PACKET_TYPE_BE(OFPHTN_ETHERTYPE,
3699 ntohs(flow->dl_type));
3700 }
3701 }
3702
3703 if (xport->peer) {
3704 if (truncate) {
3705 xlate_report_error(ctx, "Cannot truncate output to patch port");
3706 }
3707 patch_port_output(ctx, xport, xport->peer);
3708 return;
3709 }
3710
3711 memcpy(flow_vlans, flow->vlans, sizeof flow_vlans);
3712 flow_nw_tos = flow->nw_tos;
3713
3714 if (count_skb_priorities(xport)) {
3715 memset(&wc->masks.skb_priority, 0xff, sizeof wc->masks.skb_priority);
3716 if (dscp_from_skb_priority(xport, flow->skb_priority, &dscp)) {
3717 wc->masks.nw_tos |= IP_DSCP_MASK;
3718 flow->nw_tos &= ~IP_DSCP_MASK;
3719 flow->nw_tos |= dscp;
3720 }
3721 }
3722
3723 if (xport->is_tunnel) {
3724 struct in6_addr dst;
3725 /* Save tunnel metadata so that changes made due to
3726 * the Logical (tunnel) Port are not visible for any further
3727 * matches, while explicit set actions on tunnel metadata are.
3728 */
3729 flow_tnl = flow->tunnel;
3730 odp_port = tnl_port_send(xport->ofport, flow, ctx->wc);
3731 if (odp_port == ODPP_NONE) {
3732 xlate_report(ctx, OFT_WARN, "Tunneling decided against output");
3733 goto out; /* restore flow_nw_tos */
3734 }
3735 dst = flow_tnl_dst(&flow->tunnel);
3736 if (ipv6_addr_equals(&dst, &ctx->orig_tunnel_ipv6_dst)) {
3737 xlate_report(ctx, OFT_WARN, "Not tunneling to our own address");
3738 goto out; /* restore flow_nw_tos */
3739 }
3740 if (ctx->xin->resubmit_stats) {
3741 netdev_vport_inc_tx(xport->netdev, ctx->xin->resubmit_stats);
3742 }
3743 if (ctx->xin->xcache) {
3744 struct xc_entry *entry;
3745
3746 entry = xlate_cache_add_entry(ctx->xin->xcache, XC_NETDEV);
3747 entry->dev.tx = netdev_ref(xport->netdev);
3748 }
3749 out_port = odp_port;
3750 if (ovs_native_tunneling_is_on(ctx->xbridge->ofproto)) {
3751 xlate_report(ctx, OFT_DETAIL, "output to native tunnel");
3752 is_native_tunnel = true;
3753 } else {
3754 xlate_report(ctx, OFT_DETAIL, "output to kernel tunnel");
3755 commit_odp_tunnel_action(flow, &ctx->base_flow, ctx->odp_actions);
3756 flow->tunnel = flow_tnl; /* Restore tunnel metadata */
3757 }
3758 } else {
3759 odp_port = xport->odp_port;
3760 out_port = odp_port;
3761 }
3762
3763 if (out_port != ODPP_NONE) {
3764 /* Commit accumulated flow updates before output. */
3765 xlate_commit_actions(ctx);
3766
3767 if (xr) {
3768 /* Recirculate the packet. */
3769 struct ovs_action_hash *act_hash;
3770
3771 /* Hash action. */
3772 act_hash = nl_msg_put_unspec_uninit(ctx->odp_actions,
3773 OVS_ACTION_ATTR_HASH,
3774 sizeof *act_hash);
3775 act_hash->hash_alg = xr->hash_alg;
3776 act_hash->hash_basis = xr->hash_basis;
3777
3778 /* Recirc action. */
3779 nl_msg_put_u32(ctx->odp_actions, OVS_ACTION_ATTR_RECIRC,
3780 xr->recirc_id);
3781 } else if (is_native_tunnel) {
3782 /* Output to native tunnel port. */
3783 native_tunnel_output(ctx, xport, flow, odp_port, truncate);
3784 flow->tunnel = flow_tnl; /* Restore tunnel metadata */
3785
3786 } else if (terminate_native_tunnel(ctx, ofp_port, flow, wc,
3787 &odp_tnl_port)) {
3788 /* Intercept packet to be received on native tunnel port. */
3789 nl_msg_put_odp_port(ctx->odp_actions, OVS_ACTION_ATTR_TUNNEL_POP,
3790 odp_tnl_port);
3791
3792 } else {
3793 /* Tunnel push-pop action is not compatible with
3794 * IPFIX action. */
3795 compose_ipfix_action(ctx, out_port);
3796
3797 /* Handle truncation of the mirrored packet. */
3798 if (ctx->mirror_snaplen > 0 &&
3799 ctx->mirror_snaplen < UINT16_MAX) {
3800 struct ovs_action_trunc *trunc;
3801
3802 trunc = nl_msg_put_unspec_uninit(ctx->odp_actions,
3803 OVS_ACTION_ATTR_TRUNC,
3804 sizeof *trunc);
3805 trunc->max_len = ctx->mirror_snaplen;
3806 if (!ctx->xbridge->support.trunc) {
3807 ctx->xout->slow |= SLOW_ACTION;
3808 }
3809 }
3810
3811 nl_msg_put_odp_port(ctx->odp_actions,
3812 OVS_ACTION_ATTR_OUTPUT,
3813 out_port);
3814 }
3815
3816 ctx->sflow_odp_port = odp_port;
3817 ctx->sflow_n_outputs++;
3818 ctx->nf_output_iface = ofp_port;
3819 }
3820
3821 if (mbridge_has_mirrors(ctx->xbridge->mbridge) && xport->xbundle) {
3822 mirror_packet(ctx, xport->xbundle,
3823 xbundle_mirror_dst(xport->xbundle->xbridge,
3824 xport->xbundle));
3825 }
3826
3827 out:
3828 /* Restore flow */
3829 memcpy(flow->vlans, flow_vlans, sizeof flow->vlans);
3830 flow->nw_tos = flow_nw_tos;
3831 flow->dl_dst = flow_dl_dst;
3832 flow->dl_src = flow_dl_src;
3833 flow->packet_type = flow_packet_type;
3834 flow->dl_type = flow_dl_type;
3835 }
3836
3837 static void
3838 compose_output_action(struct xlate_ctx *ctx, ofp_port_t ofp_port,
3839 const struct xlate_bond_recirc *xr,
3840 bool is_last_action, bool truncate)
3841 {
3842 compose_output_action__(ctx, ofp_port, xr, true,
3843 is_last_action, truncate);
3844 }
3845
3846 static void
3847 xlate_recursively(struct xlate_ctx *ctx, struct rule_dpif *rule,
3848 bool deepens, bool is_last_action,
3849 xlate_actions_handler *actions_xlator)
3850 {
3851 struct rule_dpif *old_rule = ctx->rule;
3852 ovs_be64 old_cookie = ctx->rule_cookie;
3853 const struct rule_actions *actions;
3854
3855 if (ctx->xin->resubmit_stats) {
3856 rule_dpif_credit_stats(rule, ctx->xin->resubmit_stats);
3857 }
3858
3859 ctx->resubmits++;
3860
3861 ctx->depth += deepens;
3862 ctx->rule = rule;
3863 ctx->rule_cookie = rule->up.flow_cookie;
3864 actions = rule_get_actions(&rule->up);
3865 actions_xlator(actions->ofpacts, actions->ofpacts_len, ctx,
3866 is_last_action);
3867 ctx->rule_cookie = old_cookie;
3868 ctx->rule = old_rule;
3869 ctx->depth -= deepens;
3870 }
3871
3872 static bool
3873 xlate_resubmit_resource_check(struct xlate_ctx *ctx)
3874 {
3875 if (ctx->depth >= MAX_DEPTH) {
3876 xlate_report_error(ctx, "over max translation depth %d", MAX_DEPTH);
3877 ctx->error = XLATE_RECURSION_TOO_DEEP;
3878 } else if (ctx->resubmits >= MAX_RESUBMITS) {
3879 xlate_report_error(ctx, "over %d resubmit actions", MAX_RESUBMITS);
3880 ctx->error = XLATE_TOO_MANY_RESUBMITS;
3881 } else if (ctx->odp_actions->size > UINT16_MAX) {
3882 xlate_report_error(ctx, "resubmits yielded over 64 kB of actions");
3883 /* NOT an error, as we'll be slow-pathing the flow in this case? */
3884 ctx->exit = true; /* XXX: translation still terminated! */
3885 } else if (ctx->stack.size >= 65536) {
3886 xlate_report_error(ctx, "resubmits yielded over 64 kB of stack");
3887 ctx->error = XLATE_STACK_TOO_DEEP;
3888 } else {
3889 return true;
3890 }
3891
3892 return false;
3893 }
3894
3895 static void
3896 tuple_swap_flow(struct flow *flow, bool ipv4)
3897 {
3898 uint8_t nw_proto = flow->nw_proto;
3899 flow->nw_proto = flow->ct_nw_proto;
3900 flow->ct_nw_proto = nw_proto;
3901
3902 if (ipv4) {
3903 ovs_be32 nw_src = flow->nw_src;
3904 flow->nw_src = flow->ct_nw_src;
3905 flow->ct_nw_src = nw_src;
3906
3907 ovs_be32 nw_dst = flow->nw_dst;
3908 flow->nw_dst = flow->ct_nw_dst;
3909 flow->ct_nw_dst = nw_dst;
3910 } else {
3911 struct in6_addr ipv6_src = flow->ipv6_src;
3912 flow->ipv6_src = flow->ct_ipv6_src;
3913 flow->ct_ipv6_src = ipv6_src;
3914
3915 struct in6_addr ipv6_dst = flow->ipv6_dst;
3916 flow->ipv6_dst = flow->ct_ipv6_dst;
3917 flow->ct_ipv6_dst = ipv6_dst;
3918 }
3919
3920 ovs_be16 tp_src = flow->tp_src;
3921 flow->tp_src = flow->ct_tp_src;
3922 flow->ct_tp_src = tp_src;
3923
3924 ovs_be16 tp_dst = flow->tp_dst;
3925 flow->tp_dst = flow->ct_tp_dst;
3926 flow->ct_tp_dst = tp_dst;
3927 }
3928
3929 static void
3930 tuple_swap(struct flow *flow, struct flow_wildcards *wc)
3931 {
3932 bool ipv4 = (flow->dl_type == htons(ETH_TYPE_IP));
3933
3934 tuple_swap_flow(flow, ipv4);
3935 tuple_swap_flow(&wc->masks, ipv4);
3936 }
3937
3938 static void
3939 xlate_table_action(struct xlate_ctx *ctx, ofp_port_t in_port, uint8_t table_id,
3940 bool may_packet_in, bool honor_table_miss,
3941 bool with_ct_orig, bool is_last_action,
3942 xlate_actions_handler *xlator)
3943 {
3944 /* Check if we need to recirculate before matching in a table. */
3945 if (ctx->was_mpls) {
3946 ctx_trigger_freeze(ctx);
3947 return;
3948 }
3949 if (xlate_resubmit_resource_check(ctx)) {
3950 uint8_t old_table_id = ctx->table_id;
3951 struct rule_dpif *rule;
3952
3953 ctx->table_id = table_id;
3954
3955 /* Swap packet fields with CT 5-tuple if requested. */
3956 if (with_ct_orig) {
3957 /* Do not swap if there is no CT tuple, or if key is not IP. */
3958 if (ctx->xin->flow.ct_nw_proto == 0 ||
3959 !is_ip_any(&ctx->xin->flow)) {
3960 xlate_report_error(ctx,
3961 "resubmit(ct) with non-tracked or non-IP packet!");
3962 return;
3963 }
3964 tuple_swap(&ctx->xin->flow, ctx->wc);
3965 }
3966 rule = rule_dpif_lookup_from_table(ctx->xbridge->ofproto,
3967 ctx->xin->tables_version,
3968 &ctx->xin->flow, ctx->wc,
3969 ctx->xin->resubmit_stats,
3970 &ctx->table_id, in_port,
3971 may_packet_in, honor_table_miss,
3972 ctx->xin->xcache);
3973 /* Swap back. */
3974 if (with_ct_orig) {
3975 tuple_swap(&ctx->xin->flow, ctx->wc);
3976 }
3977
3978 if (rule) {
3979 /* Fill in the cache entry here instead of xlate_recursively
3980 * to make the reference counting more explicit. We take a
3981 * reference in the lookups above if we are going to cache the
3982 * rule. */
3983 if (ctx->xin->xcache) {
3984 struct xc_entry *entry;
3985
3986 entry = xlate_cache_add_entry(ctx->xin->xcache, XC_RULE);
3987 entry->rule = rule;
3988 ofproto_rule_ref(&rule->up);
3989 }
3990
3991 struct ovs_list *old_trace = ctx->xin->trace;
3992 xlate_report_table(ctx, rule, table_id);
3993 xlate_recursively(ctx, rule, table_id <= old_table_id,
3994 is_last_action, xlator);
3995 ctx->xin->trace = old_trace;
3996 }
3997
3998 ctx->table_id = old_table_id;
3999 return;
4000 }
4001 }
4002
4003 /* Consumes the group reference, which is only taken if xcache exists. */
4004 static void
4005 xlate_group_stats(struct xlate_ctx *ctx, struct group_dpif *group,
4006 struct ofputil_bucket *bucket)
4007 {
4008 if (ctx->xin->resubmit_stats) {
4009 group_dpif_credit_stats(group, bucket, ctx->xin->resubmit_stats);
4010 }
4011 if (ctx->xin->xcache) {
4012 struct xc_entry *entry;
4013
4014 entry = xlate_cache_add_entry(ctx->xin->xcache, XC_GROUP);
4015 entry->group.group = group;
4016 entry->group.bucket = bucket;
4017 }
4018 }
4019
4020 static void
4021 xlate_group_bucket(struct xlate_ctx *ctx, struct ofputil_bucket *bucket,
4022 bool is_last_action)
4023 {
4024 uint64_t action_list_stub[1024 / 8];
4025 struct ofpbuf action_list = OFPBUF_STUB_INITIALIZER(action_list_stub);
4026 struct ofpbuf action_set = ofpbuf_const_initializer(bucket->ofpacts,
4027 bucket->ofpacts_len);
4028 struct flow old_flow = ctx->xin->flow;
4029 bool old_was_mpls = ctx->was_mpls;
4030
4031 ofpacts_execute_action_set(&action_list, &action_set);
4032 ctx->depth++;
4033 do_xlate_actions(action_list.data, action_list.size, ctx, is_last_action);
4034 ctx->depth--;
4035
4036 ofpbuf_uninit(&action_list);
4037
4038 /* Check if need to freeze. */
4039 if (ctx->freezing) {
4040 finish_freezing(ctx);
4041 }
4042
4043 /* Roll back flow to previous state.
4044 * This is equivalent to cloning the packet for each bucket.
4045 *
4046 * As a side effect any subsequently applied actions will
4047 * also effectively be applied to a clone of the packet taken
4048 * just before applying the all or indirect group.
4049 *
4050 * Note that group buckets are action sets, hence they cannot modify the
4051 * main action set. Also any stack actions are ignored when executing an
4052 * action set, so group buckets cannot change the stack either.
4053 * However, we do allow resubmit actions in group buckets, which could
4054 * break the above assumptions. It is up to the controller to not mess up
4055 * with the action_set and stack in the tables resubmitted to from
4056 * group buckets. */
4057 ctx->xin->flow = old_flow;
4058
4059 /* The group bucket popping MPLS should have no effect after bucket
4060 * execution. */
4061 ctx->was_mpls = old_was_mpls;
4062
4063 /* The fact that the group bucket exits (for any reason) does not mean that
4064 * the translation after the group action should exit. Specifically, if
4065 * the group bucket freezes translation, the actions after the group action
4066 * must continue processing with the original, not the frozen packet! */
4067 ctx->exit = false;
4068
4069 /* Context error in a bucket should not impact processing of other buckets
4070 * or actions. This is similar to cloning a packet for group buckets.
4071 * There is no need to restore the error back to old value due to the fact
4072 * that we actually processed group action which can happen only when there
4073 * is no previous context error.
4074 *
4075 * Exception to above is errors which are system limits to protect
4076 * translation from running too long or occupy too much space. These errors
4077 * should not be masked. XLATE_RECURSION_TOO_DEEP, XLATE_TOO_MANY_RESUBMITS
4078 * and XLATE_STACK_TOO_DEEP fall in this category. */
4079 if (ctx->error == XLATE_TOO_MANY_MPLS_LABELS ||
4080 ctx->error == XLATE_UNSUPPORTED_PACKET_TYPE) {
4081 /* reset the error and continue processing other buckets */
4082 ctx->error = XLATE_OK;
4083 }
4084 }
4085
4086 static void
4087 xlate_all_group(struct xlate_ctx *ctx, struct group_dpif *group,
4088 bool is_last_action)
4089 {
4090 struct ofputil_bucket *bucket;
4091 LIST_FOR_EACH (bucket, list_node, &group->up.buckets) {
4092 bool last = is_last_action && !bucket->list_node.next;
4093 xlate_group_bucket(ctx, bucket, last);
4094 }
4095 xlate_group_stats(ctx, group, NULL);
4096 }
4097
4098 static void
4099 xlate_ff_group(struct xlate_ctx *ctx, struct group_dpif *group,
4100 bool is_last_action)
4101 {
4102 struct ofputil_bucket *bucket;
4103
4104 bucket = group_first_live_bucket(ctx, group, 0);
4105 if (bucket) {
4106 xlate_group_bucket(ctx, bucket, is_last_action);
4107 xlate_group_stats(ctx, group, bucket);
4108 } else if (ctx->xin->xcache) {
4109 ofproto_group_unref(&group->up);
4110 }
4111 }
4112
4113 static void
4114 xlate_default_select_group(struct xlate_ctx *ctx, struct group_dpif *group,
4115 bool is_last_action)
4116 {
4117 struct flow_wildcards *wc = ctx->wc;
4118 struct ofputil_bucket *bucket;
4119 uint32_t basis;
4120
4121 basis = flow_hash_symmetric_l4(&ctx->xin->flow, 0);
4122 flow_mask_hash_fields(&ctx->xin->flow, wc, NX_HASH_FIELDS_SYMMETRIC_L4);
4123 bucket = group_best_live_bucket(ctx, group, basis);
4124 if (bucket) {
4125 xlate_group_bucket(ctx, bucket, is_last_action);
4126 xlate_group_stats(ctx, group, bucket);
4127 } else if (ctx->xin->xcache) {
4128 ofproto_group_unref(&group->up);
4129 }
4130 }
4131
4132 static void
4133 xlate_hash_fields_select_group(struct xlate_ctx *ctx, struct group_dpif *group,
4134 bool is_last_action)
4135 {
4136 const struct field_array *fields = &group->up.props.fields;
4137 const uint8_t *mask_values = fields->values;
4138 uint32_t basis = hash_uint64(group->up.props.selection_method_param);
4139
4140 size_t i;
4141 BITMAP_FOR_EACH_1 (i, MFF_N_IDS, fields->used.bm) {
4142 const struct mf_field *mf = mf_from_id(i);
4143
4144 /* Skip fields for which prerequisites are not met. */
4145 if (!mf_are_prereqs_ok(mf, &ctx->xin->flow, ctx->wc)) {
4146 /* Skip the mask bytes for this field. */
4147 mask_values += mf->n_bytes;
4148 continue;
4149 }
4150
4151 union mf_value value;
4152 union mf_value mask;
4153
4154 mf_get_value(mf, &ctx->xin->flow, &value);
4155 /* Mask the value. */
4156 for (int j = 0; j < mf->n_bytes; j++) {
4157 mask.b[j] = *mask_values++;
4158 value.b[j] &= mask.b[j];
4159 }
4160 basis = hash_bytes(&value, mf->n_bytes, basis);
4161
4162 /* For tunnels, hash in whether the field is present. */
4163 if (mf_is_tun_metadata(mf)) {
4164 basis = hash_boolean(mf_is_set(mf, &ctx->xin->flow), basis);
4165 }
4166
4167 mf_mask_field_masked(mf, &mask, ctx->wc);
4168 }
4169
4170 struct ofputil_bucket *bucket = group_best_live_bucket(ctx, group, basis);
4171 if (bucket) {
4172 xlate_group_bucket(ctx, bucket, is_last_action);
4173 xlate_group_stats(ctx, group, bucket);
4174 } else if (ctx->xin->xcache) {
4175 ofproto_group_unref(&group->up);
4176 }
4177 }
4178
4179 static void
4180 xlate_dp_hash_select_group(struct xlate_ctx *ctx, struct group_dpif *group,
4181 bool is_last_action)
4182 {
4183 struct ofputil_bucket *bucket;
4184
4185 /* dp_hash value 0 is special since it means that the dp_hash has not been
4186 * computed, as all computed dp_hash values are non-zero. Therefore
4187 * compare to zero can be used to decide if the dp_hash value is valid
4188 * without masking the dp_hash field. */
4189 if (!ctx->xin->flow.dp_hash) {
4190 uint64_t param = group->up.props.selection_method_param;
4191
4192 ctx_trigger_recirculate_with_hash(ctx, param >> 32, (uint32_t)param);
4193 } else {
4194 uint32_t n_buckets = group->up.n_buckets;
4195 if (n_buckets) {
4196 /* Minimal mask to cover the number of buckets. */
4197 uint32_t mask = (1 << log_2_ceil(n_buckets)) - 1;
4198 /* Multiplier chosen to make the trivial 1 bit case to
4199 * actually distribute amongst two equal weight buckets. */
4200 uint32_t basis = 0xc2b73583 * (ctx->xin->flow.dp_hash & mask);
4201
4202 ctx->wc->masks.dp_hash |= mask;
4203 bucket = group_best_live_bucket(ctx, group, basis);
4204 if (bucket) {
4205 xlate_group_bucket(ctx, bucket, is_last_action);
4206 xlate_group_stats(ctx, group, bucket);
4207 }
4208 }
4209 }
4210 }
4211
4212 static void
4213 xlate_select_group(struct xlate_ctx *ctx, struct group_dpif *group,
4214 bool is_last_action)
4215 {
4216 const char *selection_method = group->up.props.selection_method;
4217
4218 /* Select groups may access flow keys beyond L2 in order to
4219 * select a bucket. Recirculate as appropriate to make this possible.
4220 */
4221 if (ctx->was_mpls) {
4222 ctx_trigger_freeze(ctx);
4223 }
4224
4225 if (selection_method[0] == '\0') {
4226 xlate_default_select_group(ctx, group, is_last_action);
4227 } else if (!strcasecmp("hash", selection_method)) {
4228 xlate_hash_fields_select_group(ctx, group, is_last_action);
4229 } else if (!strcasecmp("dp_hash", selection_method)) {
4230 xlate_dp_hash_select_group(ctx, group, is_last_action);
4231 } else {
4232 /* Parsing of groups should ensure this never happens */
4233 OVS_NOT_REACHED();
4234 }
4235 }
4236
4237 static void
4238 xlate_group_action__(struct xlate_ctx *ctx, struct group_dpif *group,
4239 bool is_last_action)
4240 {
4241 bool was_in_group = ctx->in_group;
4242 ctx->in_group = true;
4243
4244 switch (group->up.type) {
4245 case OFPGT11_ALL:
4246 case OFPGT11_INDIRECT:
4247 xlate_all_group(ctx, group, is_last_action);
4248 break;
4249 case OFPGT11_SELECT:
4250 xlate_select_group(ctx, group, is_last_action);
4251 break;
4252 case OFPGT11_FF:
4253 xlate_ff_group(ctx, group, is_last_action);
4254 break;
4255 default:
4256 OVS_NOT_REACHED();
4257 }
4258
4259 ctx->in_group = was_in_group;
4260 }
4261
4262 static bool
4263 xlate_group_action(struct xlate_ctx *ctx, uint32_t group_id,
4264 bool is_last_action)
4265 {
4266 if (xlate_resubmit_resource_check(ctx)) {
4267 struct group_dpif *group;
4268
4269 /* Take ref only if xcache exists. */
4270 group = group_dpif_lookup(ctx->xbridge->ofproto, group_id,
4271 ctx->xin->tables_version, ctx->xin->xcache);
4272 if (!group) {
4273 /* XXX: Should set ctx->error ? */
4274 xlate_report(ctx, OFT_WARN, "output to nonexistent group %"PRIu32,
4275 group_id);
4276 return true;
4277 }
4278 xlate_group_action__(ctx, group, is_last_action);
4279 }
4280
4281 return false;
4282 }
4283
4284 static void
4285 xlate_ofpact_resubmit(struct xlate_ctx *ctx,
4286 const struct ofpact_resubmit *resubmit,
4287 bool is_last_action)
4288 {
4289 ofp_port_t in_port;
4290 uint8_t table_id;
4291 bool may_packet_in = false;
4292 bool honor_table_miss = false;
4293
4294 if (ctx->rule && rule_dpif_is_internal(ctx->rule)) {
4295 /* Still allow missed packets to be sent to the controller
4296 * if resubmitting from an internal table. */
4297 may_packet_in = true;
4298 honor_table_miss = true;
4299 }
4300
4301 in_port = resubmit->in_port;
4302 if (in_port == OFPP_IN_PORT) {
4303 in_port = ctx->xin->flow.in_port.ofp_port;
4304 }
4305
4306 table_id = resubmit->table_id;
4307 if (table_id == 255) {
4308 table_id = ctx->table_id;
4309 }
4310
4311 xlate_table_action(ctx, in_port, table_id, may_packet_in,
4312 honor_table_miss, resubmit->with_ct_orig,
4313 is_last_action, do_xlate_actions);
4314 }
4315
4316 static void
4317 flood_packet_to_port(struct xlate_ctx *ctx, const struct xport *xport,
4318 bool all, bool is_last_action)
4319 {
4320 if (!xport) {
4321 return;
4322 }
4323
4324 if (all) {
4325 compose_output_action__(ctx, xport->ofp_port, NULL, false,
4326 is_last_action, false);
4327 } else {
4328 compose_output_action(ctx, xport->ofp_port, NULL, is_last_action,
4329 false);
4330 }
4331 }
4332
4333 static void
4334 flood_packets(struct xlate_ctx *ctx, bool all, bool is_last_action)
4335 {
4336 const struct xport *xport, *last = NULL;
4337
4338 /* Use 'last' the keep track of the last output port. */
4339 HMAP_FOR_EACH (xport, ofp_node, &ctx->xbridge->xports) {
4340 if (xport->ofp_port == ctx->xin->flow.in_port.ofp_port) {
4341 continue;
4342 }
4343
4344 if (all || !(xport->config & OFPUTIL_PC_NO_FLOOD)) {
4345 /* 'last' is not the last port, send a packet out, and
4346 * update 'last'. */
4347 flood_packet_to_port(ctx, last, all, false);
4348 last = xport;
4349 }
4350 }
4351
4352 /* Send the packet to the 'last' port. */
4353 flood_packet_to_port(ctx, last, all, is_last_action);
4354 ctx->nf_output_iface = NF_OUT_FLOOD;
4355 }
4356
4357 static void
4358 put_controller_user_action(struct xlate_ctx *ctx,
4359 bool dont_send, bool continuation,
4360 uint32_t recirc_id, int len,
4361 enum ofp_packet_in_reason reason,
4362 uint16_t controller_id)
4363 {
4364 struct user_action_cookie cookie;
4365
4366 memset(&cookie, 0, sizeof cookie);
4367 cookie.type = USER_ACTION_COOKIE_CONTROLLER;
4368 cookie.ofp_in_port = OFPP_NONE,
4369 cookie.ofproto_uuid = ctx->xbridge->ofproto->uuid;
4370 cookie.controller.dont_send = dont_send;
4371 cookie.controller.continuation = continuation;
4372 cookie.controller.reason = reason;
4373 cookie.controller.recirc_id = recirc_id;
4374 put_32aligned_be64(&cookie.controller.rule_cookie, ctx->rule_cookie);
4375 cookie.controller.controller_id = controller_id;
4376 cookie.controller.max_len = len;
4377
4378 odp_port_t odp_port = ofp_port_to_odp_port(ctx->xbridge,
4379 ctx->xin->flow.in_port.ofp_port);
4380 uint32_t pid = dpif_port_get_pid(ctx->xbridge->dpif, odp_port,
4381 flow_hash_5tuple(&ctx->xin->flow, 0));
4382 odp_put_userspace_action(pid, &cookie, sizeof cookie, ODPP_NONE,
4383 false, ctx->odp_actions);
4384 }
4385
4386 static void
4387 xlate_controller_action(struct xlate_ctx *ctx, int len,
4388 enum ofp_packet_in_reason reason,
4389 uint16_t controller_id,
4390 const uint8_t *userdata, size_t userdata_len)
4391 {
4392 xlate_commit_actions(ctx);
4393
4394 /* A packet sent by an action in a table-miss rule is considered an
4395 * explicit table miss. OpenFlow before 1.3 doesn't have that concept so
4396 * it will get translated back to OFPR_ACTION for those versions. */
4397 if (reason == OFPR_ACTION
4398 && ctx->rule && rule_is_table_miss(&ctx->rule->up)) {
4399 reason = OFPR_EXPLICIT_MISS;
4400 }
4401
4402 struct frozen_state state = {
4403 .table_id = ctx->table_id,
4404 .ofproto_uuid = ctx->xbridge->ofproto->uuid,
4405 .stack = ctx->stack.data,
4406 .stack_size = ctx->stack.size,
4407 .mirrors = ctx->mirrors,
4408 .conntracked = ctx->conntracked,
4409 .ofpacts = NULL,
4410 .ofpacts_len = 0,
4411 .action_set = NULL,
4412 .action_set_len = 0,
4413 .userdata = CONST_CAST(uint8_t *, userdata),
4414 .userdata_len = userdata_len,
4415 };
4416 frozen_metadata_from_flow(&state.metadata, &ctx->xin->flow);
4417
4418 uint32_t recirc_id = recirc_alloc_id_ctx(&state);
4419 if (!recirc_id) {
4420 xlate_report_error(ctx, "Failed to allocate recirculation id");
4421 ctx->error = XLATE_NO_RECIRCULATION_CONTEXT;
4422 return;
4423 }
4424 recirc_refs_add(&ctx->xout->recircs, recirc_id);
4425
4426 size_t offset;
4427 size_t ac_offset;
4428 uint32_t meter_id = ctx->xbridge->ofproto->up.controller_meter_id;
4429 if (meter_id != UINT32_MAX) {
4430 /* If controller meter is configured, generate clone(meter, userspace)
4431 * action. */
4432 offset = nl_msg_start_nested(ctx->odp_actions, OVS_ACTION_ATTR_SAMPLE);
4433 nl_msg_put_u32(ctx->odp_actions, OVS_SAMPLE_ATTR_PROBABILITY,
4434 UINT32_MAX);
4435 ac_offset = nl_msg_start_nested(ctx->odp_actions,
4436 OVS_SAMPLE_ATTR_ACTIONS);
4437 nl_msg_put_u32(ctx->odp_actions, OVS_ACTION_ATTR_METER, meter_id);
4438 }
4439
4440 /* Generate the datapath flows even if we don't send the packet-in
4441 * so that debugging more closely represents normal state. */
4442 bool dont_send = false;
4443 if (!ctx->xin->allow_side_effects && !ctx->xin->xcache) {
4444 dont_send = true;
4445 }
4446 put_controller_user_action(ctx, dont_send, false, recirc_id, len,
4447 reason, controller_id);
4448
4449 if (meter_id != UINT32_MAX) {
4450 nl_msg_end_nested(ctx->odp_actions, ac_offset);
4451 nl_msg_end_nested(ctx->odp_actions, offset);
4452 }
4453 }
4454
4455 /* Creates a frozen state, and allocates a unique recirc id for the given
4456 * state. Returns a non-zero recirc id if it is allocated successfully.
4457 * Returns 0 otherwise.
4458 **/
4459 static uint32_t
4460 finish_freezing__(struct xlate_ctx *ctx, uint8_t table)
4461 {
4462 ovs_assert(ctx->freezing);
4463
4464 struct frozen_state state = {
4465 .table_id = table,
4466 .ofproto_uuid = ctx->xbridge->ofproto->uuid,
4467 .stack = ctx->stack.data,
4468 .stack_size = ctx->stack.size,
4469 .mirrors = ctx->mirrors,
4470 .conntracked = ctx->conntracked,
4471 .ofpacts = ctx->frozen_actions.data,
4472 .ofpacts_len = ctx->frozen_actions.size,
4473 .action_set = ctx->action_set.data,
4474 .action_set_len = ctx->action_set.size,
4475 .userdata = ctx->pause ? CONST_CAST(uint8_t *,ctx->pause->userdata)
4476 : NULL,
4477 .userdata_len = ctx->pause ? ctx->pause->userdata_len : 0,
4478 };
4479 frozen_metadata_from_flow(&state.metadata, &ctx->xin->flow);
4480
4481 /* Allocate a unique recirc id for the given metadata state in the
4482 * flow. An existing id, with a new reference to the corresponding
4483 * recirculation context, will be returned if possible.
4484 * The life-cycle of this recirc id is managed by associating it
4485 * with the udpif key ('ukey') created for each new datapath flow. */
4486 uint32_t recirc_id = recirc_alloc_id_ctx(&state);
4487 if (!recirc_id) {
4488 xlate_report_error(ctx, "Failed to allocate recirculation id");
4489 ctx->error = XLATE_NO_RECIRCULATION_CONTEXT;
4490 return 0;
4491 }
4492 recirc_refs_add(&ctx->xout->recircs, recirc_id);
4493
4494 if (ctx->pause) {
4495 if (!ctx->xin->allow_side_effects && !ctx->xin->xcache) {
4496 return 0;
4497 }
4498
4499 put_controller_user_action(ctx, false, true, recirc_id,
4500 ctx->pause->max_len,
4501 ctx->pause->reason,
4502 ctx->pause->controller_id);
4503 } else {
4504 if (ctx->recirc_update_dp_hash) {
4505 struct ovs_action_hash *act_hash;
4506
4507 /* Hash action. */
4508 act_hash = nl_msg_put_unspec_uninit(ctx->odp_actions,
4509 OVS_ACTION_ATTR_HASH,
4510 sizeof *act_hash);
4511 act_hash->hash_alg = OVS_HASH_ALG_L4; /* Make configurable. */
4512 act_hash->hash_basis = 0; /* Make configurable. */
4513 }
4514 nl_msg_put_u32(ctx->odp_actions, OVS_ACTION_ATTR_RECIRC, recirc_id);
4515 }
4516
4517 /* Undo changes done by freezing. */
4518 ctx_cancel_freeze(ctx);
4519 return recirc_id;
4520 }
4521
4522 /* Called only when we're freezing. */
4523 static void
4524 finish_freezing(struct xlate_ctx *ctx)
4525 {
4526 xlate_commit_actions(ctx);
4527 finish_freezing__(ctx, 0);
4528 }
4529
4530 /* Fork the pipeline here. The current packet will continue processing the
4531 * current action list. A clone of the current packet will recirculate, skip
4532 * the remainder of the current action list and asynchronously resume pipeline
4533 * processing in 'table' with the current metadata and action set. */
4534 static void
4535 compose_recirculate_and_fork(struct xlate_ctx *ctx, uint8_t table,
4536 const uint16_t zone)
4537 {
4538 uint32_t recirc_id;
4539 ctx->freezing = true;
4540 recirc_id = finish_freezing__(ctx, table);
4541
4542 if (OVS_UNLIKELY(ctx->xin->trace) && recirc_id) {
4543 if (oftrace_add_recirc_node(ctx->xin->recirc_queue,
4544 OFT_RECIRC_CONNTRACK, &ctx->xin->flow,
4545 ctx->xin->packet, recirc_id, zone)) {
4546 xlate_report(ctx, OFT_DETAIL, "A clone of the packet is forked to "
4547 "recirculate. The forked pipeline will be resumed at "
4548 "table %u.", table);
4549 } else {
4550 xlate_report(ctx, OFT_DETAIL, "Failed to trace the conntrack "
4551 "forked pipeline with recirc_id = %d.", recirc_id);
4552 }
4553 }
4554 }
4555
4556 static void
4557 compose_mpls_push_action(struct xlate_ctx *ctx, struct ofpact_push_mpls *mpls)
4558 {
4559 struct flow *flow = &ctx->xin->flow;
4560 int n;
4561
4562 ovs_assert(eth_type_mpls(mpls->ethertype));
4563
4564 n = flow_count_mpls_labels(flow, ctx->wc);
4565 if (!n) {
4566 xlate_commit_actions(ctx);
4567 } else if (n >= FLOW_MAX_MPLS_LABELS) {
4568 if (ctx->xin->packet != NULL) {
4569 xlate_report_error(ctx, "dropping packet on which an MPLS push "
4570 "action can't be performed as it would have "
4571 "more MPLS LSEs than the %d supported.",
4572 FLOW_MAX_MPLS_LABELS);
4573 }
4574 ctx->error = XLATE_TOO_MANY_MPLS_LABELS;
4575 return;
4576 }
4577
4578 /* Update flow's MPLS stack, and clear L3/4 fields to mark them invalid. */
4579 flow_push_mpls(flow, n, mpls->ethertype, ctx->wc, true);
4580 }
4581
4582 static void
4583 compose_mpls_pop_action(struct xlate_ctx *ctx, ovs_be16 eth_type)
4584 {
4585 struct flow *flow = &ctx->xin->flow;
4586 int n = flow_count_mpls_labels(flow, ctx->wc);
4587
4588 if (flow_pop_mpls(flow, n, eth_type, ctx->wc)) {
4589 if (!eth_type_mpls(eth_type) && ctx->xbridge->support.odp.recirc) {
4590 ctx->was_mpls = true;
4591 }
4592 } else if (n >= FLOW_MAX_MPLS_LABELS) {
4593 if (ctx->xin->packet != NULL) {
4594 xlate_report_error(ctx, "dropping packet on which an "
4595 "MPLS pop action can't be performed as it has "
4596 "more MPLS LSEs than the %d supported.",
4597 FLOW_MAX_MPLS_LABELS);
4598 }
4599 ctx->error = XLATE_TOO_MANY_MPLS_LABELS;
4600 ofpbuf_clear(ctx->odp_actions);
4601 }
4602 }
4603
4604 static bool
4605 compose_dec_ttl(struct xlate_ctx *ctx, struct ofpact_cnt_ids *ids)
4606 {
4607 struct flow *flow = &ctx->xin->flow;
4608
4609 if (!is_ip_any(flow)) {
4610 return false;
4611 }
4612
4613 ctx->wc->masks.nw_ttl = 0xff;
4614 if (flow->nw_ttl > 1) {
4615 flow->nw_ttl--;
4616 return false;
4617 } else {
4618 size_t i;
4619
4620 for (i = 0; i < ids->n_controllers; i++) {
4621 xlate_controller_action(ctx, UINT16_MAX, OFPR_INVALID_TTL,
4622 ids->cnt_ids[i], NULL, 0);
4623 }
4624
4625 /* Stop processing for current table. */
4626 xlate_report(ctx, OFT_WARN, "IPv%d decrement TTL exception",
4627 flow->dl_type == htons(ETH_TYPE_IP) ? 4 : 6);
4628 return true;
4629 }
4630 }
4631
4632 static void
4633 compose_set_mpls_label_action(struct xlate_ctx *ctx, ovs_be32 label)
4634 {
4635 if (eth_type_mpls(ctx->xin->flow.dl_type)) {
4636 ctx->wc->masks.mpls_lse[0] |= htonl(MPLS_LABEL_MASK);
4637 set_mpls_lse_label(&ctx->xin->flow.mpls_lse[0], label);
4638 }
4639 }
4640
4641 static void
4642 compose_set_mpls_tc_action(struct xlate_ctx *ctx, uint8_t tc)
4643 {
4644 if (eth_type_mpls(ctx->xin->flow.dl_type)) {
4645 ctx->wc->masks.mpls_lse[0] |= htonl(MPLS_TC_MASK);
4646 set_mpls_lse_tc(&ctx->xin->flow.mpls_lse[0], tc);
4647 }
4648 }
4649
4650 static bool
4651 compose_dec_nsh_ttl_action(struct xlate_ctx *ctx)
4652 {
4653 struct flow *flow = &ctx->xin->flow;
4654
4655 if ((flow->packet_type == htonl(PT_NSH)) ||
4656 (flow->dl_type == htons(ETH_TYPE_NSH))) {
4657 ctx->wc->masks.nsh.ttl = 0xff;
4658 if (flow->nsh.ttl > 1) {
4659 flow->nsh.ttl--;
4660 return false;
4661 } else {
4662 xlate_controller_action(ctx, UINT16_MAX, OFPR_INVALID_TTL,
4663 0, NULL, 0);
4664 }
4665 }
4666
4667 /* Stop processing for current table. */
4668 xlate_report(ctx, OFT_WARN, "NSH decrement TTL exception");
4669 return true;
4670 }
4671
4672 static void
4673 compose_set_mpls_ttl_action(struct xlate_ctx *ctx, uint8_t ttl)
4674 {
4675 if (eth_type_mpls(ctx->xin->flow.dl_type)) {
4676 ctx->wc->masks.mpls_lse[0] |= htonl(MPLS_TTL_MASK);
4677 set_mpls_lse_ttl(&ctx->xin->flow.mpls_lse[0], ttl);
4678 }
4679 }
4680
4681 static bool
4682 compose_dec_mpls_ttl_action(struct xlate_ctx *ctx)
4683 {
4684 struct flow *flow = &ctx->xin->flow;
4685
4686 if (eth_type_mpls(flow->dl_type)) {
4687 uint8_t ttl = mpls_lse_to_ttl(flow->mpls_lse[0]);
4688
4689 ctx->wc->masks.mpls_lse[0] |= htonl(MPLS_TTL_MASK);
4690 if (ttl > 1) {
4691 ttl--;
4692 set_mpls_lse_ttl(&flow->mpls_lse[0], ttl);
4693 return false;
4694 } else {
4695 xlate_controller_action(ctx, UINT16_MAX, OFPR_INVALID_TTL, 0,
4696 NULL, 0);
4697 }
4698 }
4699
4700 /* Stop processing for current table. */
4701 xlate_report(ctx, OFT_WARN, "MPLS decrement TTL exception");
4702 return true;
4703 }
4704
4705 /* Emits an action that outputs to 'port', within 'ctx'.
4706 *
4707 * 'controller_len' affects only packets sent to an OpenFlow controller. It
4708 * is the maximum number of bytes of the packet to send. UINT16_MAX means to
4709 * send the whole packet (and 0 means to omit the packet entirely).
4710 *
4711 * 'may_packet_in' determines whether the packet may be sent to an OpenFlow
4712 * controller. If it is false, then the packet is never sent to the OpenFlow
4713 * controller.
4714 *
4715 * 'is_last_action' should be true if this output is the last OpenFlow action
4716 * to be processed, which enables certain optimizations.
4717 *
4718 * 'truncate' should be true if the packet to be output is being truncated,
4719 * which suppresses certain optimizations. */
4720 static void
4721 xlate_output_action(struct xlate_ctx *ctx, ofp_port_t port,
4722 uint16_t controller_len, bool may_packet_in,
4723 bool is_last_action, bool truncate)
4724 {
4725 ofp_port_t prev_nf_output_iface = ctx->nf_output_iface;
4726
4727 ctx->nf_output_iface = NF_OUT_DROP;
4728
4729 switch (port) {
4730 case OFPP_IN_PORT:
4731 compose_output_action(ctx, ctx->xin->flow.in_port.ofp_port, NULL,
4732 is_last_action, truncate);
4733 break;
4734 case OFPP_TABLE:
4735 xlate_table_action(ctx, ctx->xin->flow.in_port.ofp_port,
4736 0, may_packet_in, true, false, false,
4737 do_xlate_actions);
4738 break;
4739 case OFPP_NORMAL:
4740 xlate_normal(ctx);
4741 break;
4742 case OFPP_FLOOD:
4743 flood_packets(ctx, false, is_last_action);
4744 break;
4745 case OFPP_ALL:
4746 flood_packets(ctx, true, is_last_action);
4747 break;
4748 case OFPP_CONTROLLER:
4749 xlate_controller_action(ctx, controller_len,
4750 (ctx->in_packet_out ? OFPR_PACKET_OUT
4751 : ctx->in_group ? OFPR_GROUP
4752 : ctx->in_action_set ? OFPR_ACTION_SET
4753 : OFPR_ACTION),
4754 0, NULL, 0);
4755 break;
4756 case OFPP_NONE:
4757 break;
4758 case OFPP_LOCAL:
4759 default:
4760 if (port != ctx->xin->flow.in_port.ofp_port) {
4761 compose_output_action(ctx, port, NULL, is_last_action, truncate);
4762 } else {
4763 xlate_report(ctx, OFT_WARN, "skipping output to input port");
4764 }
4765 break;
4766 }
4767
4768 if (prev_nf_output_iface == NF_OUT_FLOOD) {
4769 ctx->nf_output_iface = NF_OUT_FLOOD;
4770 } else if (ctx->nf_output_iface == NF_OUT_DROP) {
4771 ctx->nf_output_iface = prev_nf_output_iface;
4772 } else if (prev_nf_output_iface != NF_OUT_DROP &&
4773 ctx->nf_output_iface != NF_OUT_FLOOD) {
4774 ctx->nf_output_iface = NF_OUT_MULTI;
4775 }
4776 }
4777
4778 static void
4779 xlate_output_reg_action(struct xlate_ctx *ctx,
4780 const struct ofpact_output_reg *or,
4781 bool is_last_action)
4782 {
4783 uint64_t port = mf_get_subfield(&or->src, &ctx->xin->flow);
4784 if (port <= UINT16_MAX) {
4785 xlate_report(ctx, OFT_DETAIL, "output port is %"PRIu64, port);
4786
4787 union mf_subvalue value;
4788
4789 memset(&value, 0xff, sizeof value);
4790 mf_write_subfield_flow(&or->src, &value, &ctx->wc->masks);
4791 xlate_output_action(ctx, u16_to_ofp(port), or->max_len,
4792 false, is_last_action, false);
4793 } else {
4794 xlate_report(ctx, OFT_WARN, "output port %"PRIu64" is out of range",
4795 port);
4796 }
4797 }
4798
4799 static void
4800 xlate_output_trunc_action(struct xlate_ctx *ctx,
4801 ofp_port_t port, uint32_t max_len,
4802 bool is_last_action)
4803 {
4804 bool support_trunc = ctx->xbridge->support.trunc;
4805 struct ovs_action_trunc *trunc;
4806 char name[OFP10_MAX_PORT_NAME_LEN];
4807
4808 switch (port) {
4809 case OFPP_TABLE:
4810 case OFPP_NORMAL:
4811 case OFPP_FLOOD:
4812 case OFPP_ALL:
4813 case OFPP_CONTROLLER:
4814 case OFPP_NONE:
4815 ofputil_port_to_string(port, NULL, name, sizeof name);
4816 xlate_report(ctx, OFT_WARN,
4817 "output_trunc does not support port: %s", name);
4818 break;
4819 case OFPP_LOCAL:
4820 case OFPP_IN_PORT:
4821 default:
4822 if (port != ctx->xin->flow.in_port.ofp_port) {
4823 const struct xport *xport = get_ofp_port(ctx->xbridge, port);
4824
4825 if (xport == NULL || xport->odp_port == ODPP_NONE) {
4826 /* Since truncate happens at its following output action, if
4827 * the output port is a patch port, the behavior is somehow
4828 * unpredictable. For simplicity, disallow this case. */
4829 ofputil_port_to_string(port, NULL, name, sizeof name);
4830 xlate_report_error(ctx, "output_trunc does not support "
4831 "patch port %s", name);
4832 break;
4833 }
4834
4835 trunc = nl_msg_put_unspec_uninit(ctx->odp_actions,
4836 OVS_ACTION_ATTR_TRUNC,
4837 sizeof *trunc);
4838 trunc->max_len = max_len;
4839 xlate_output_action(ctx, port, 0, false, is_last_action, true);
4840 if (!support_trunc) {
4841 ctx->xout->slow |= SLOW_ACTION;
4842 }
4843 } else {
4844 xlate_report(ctx, OFT_WARN, "skipping output to input port");
4845 }
4846 break;
4847 }
4848 }
4849
4850 static void
4851 xlate_enqueue_action(struct xlate_ctx *ctx,
4852 const struct ofpact_enqueue *enqueue,
4853 bool is_last_action)
4854 {
4855 ofp_port_t ofp_port = enqueue->port;
4856 uint32_t queue_id = enqueue->queue;
4857 uint32_t flow_priority, priority;
4858 int error;
4859
4860 /* Translate queue to priority. */
4861 error = dpif_queue_to_priority(ctx->xbridge->dpif, queue_id, &priority);
4862 if (error) {
4863 /* Fall back to ordinary output action. */
4864 xlate_output_action(ctx, enqueue->port, 0, false,
4865 is_last_action, false);
4866 return;
4867 }
4868
4869 /* Check output port. */
4870 if (ofp_port == OFPP_IN_PORT) {
4871 ofp_port = ctx->xin->flow.in_port.ofp_port;
4872 } else if (ofp_port == ctx->xin->flow.in_port.ofp_port) {
4873 return;
4874 }
4875
4876 /* Add datapath actions. */
4877 flow_priority = ctx->xin->flow.skb_priority;
4878 ctx->xin->flow.skb_priority = priority;
4879 compose_output_action(ctx, ofp_port, NULL, is_last_action, false);
4880 ctx->xin->flow.skb_priority = flow_priority;
4881
4882 /* Update NetFlow output port. */
4883 if (ctx->nf_output_iface == NF_OUT_DROP) {
4884 ctx->nf_output_iface = ofp_port;
4885 } else if (ctx->nf_output_iface != NF_OUT_FLOOD) {
4886 ctx->nf_output_iface = NF_OUT_MULTI;
4887 }
4888 }
4889
4890 static void
4891 xlate_set_queue_action(struct xlate_ctx *ctx, uint32_t queue_id)
4892 {
4893 uint32_t skb_priority;
4894
4895 if (!dpif_queue_to_priority(ctx->xbridge->dpif, queue_id, &skb_priority)) {
4896 ctx->xin->flow.skb_priority = skb_priority;
4897 } else {
4898 /* Couldn't translate queue to a priority. Nothing to do. A warning
4899 * has already been logged. */
4900 }
4901 }
4902
4903 static bool
4904 slave_enabled_cb(ofp_port_t ofp_port, void *xbridge_)
4905 {
4906 const struct xbridge *xbridge = xbridge_;
4907 struct xport *port;
4908
4909 switch (ofp_port) {
4910 case OFPP_IN_PORT:
4911 case OFPP_TABLE:
4912 case OFPP_NORMAL:
4913 case OFPP_FLOOD:
4914 case OFPP_ALL:
4915 case OFPP_NONE:
4916 return true;
4917 case OFPP_CONTROLLER: /* Not supported by the bundle action. */
4918 return false;
4919 default:
4920 port = get_ofp_port(xbridge, ofp_port);
4921 return port ? port->may_enable : false;
4922 }
4923 }
4924
4925 static void
4926 xlate_bundle_action(struct xlate_ctx *ctx,
4927 const struct ofpact_bundle *bundle,
4928 bool is_last_action)
4929 {
4930 ofp_port_t port;
4931
4932 port = bundle_execute(bundle, &ctx->xin->flow, ctx->wc, slave_enabled_cb,
4933 CONST_CAST(struct xbridge *, ctx->xbridge));
4934 if (bundle->dst.field) {
4935 nxm_reg_load(&bundle->dst, ofp_to_u16(port), &ctx->xin->flow, ctx->wc);
4936 xlate_report_subfield(ctx, &bundle->dst);
4937 } else {
4938 xlate_output_action(ctx, port, 0, false, is_last_action, false);
4939 }
4940 }
4941
4942 static void
4943 xlate_learn_action(struct xlate_ctx *ctx, const struct ofpact_learn *learn)
4944 {
4945 learn_mask(learn, ctx->wc);
4946
4947 if (ctx->xin->xcache || ctx->xin->allow_side_effects) {
4948 uint64_t ofpacts_stub[1024 / 8];
4949 struct ofputil_flow_mod fm;
4950 struct ofproto_flow_mod ofm__, *ofm;
4951 struct ofpbuf ofpacts;
4952 enum ofperr error;
4953
4954 if (ctx->xin->xcache) {
4955 ofm = xmalloc(sizeof *ofm);
4956 } else {
4957 ofm = &ofm__;
4958 }
4959
4960 ofpbuf_use_stub(&ofpacts, ofpacts_stub, sizeof ofpacts_stub);
4961 learn_execute(learn, &ctx->xin->flow, &fm, &ofpacts);
4962 if (OVS_UNLIKELY(ctx->xin->trace)) {
4963 struct ds s = DS_EMPTY_INITIALIZER;
4964 ds_put_format(&s, "table=%"PRIu8" ", fm.table_id);
4965 match_format(&fm.match, NULL, &s, OFP_DEFAULT_PRIORITY);
4966 ds_chomp(&s, ' ');
4967 ds_put_format(&s, " priority=%d", fm.priority);
4968 if (fm.new_cookie) {
4969 ds_put_format(&s, " cookie=%#"PRIx64, ntohll(fm.new_cookie));
4970 }
4971 if (fm.idle_timeout != OFP_FLOW_PERMANENT) {
4972 ds_put_format(&s, " idle=%"PRIu16, fm.idle_timeout);
4973 }
4974 if (fm.hard_timeout != OFP_FLOW_PERMANENT) {
4975 ds_put_format(&s, " hard=%"PRIu16, fm.hard_timeout);
4976 }
4977 if (fm.flags & NX_LEARN_F_SEND_FLOW_REM) {
4978 ds_put_cstr(&s, " send_flow_rem");
4979 }
4980 ds_put_cstr(&s, " actions=");
4981 ofpacts_format(fm.ofpacts, fm.ofpacts_len, NULL, &s);
4982 xlate_report(ctx, OFT_DETAIL, "%s", ds_cstr(&s));
4983 ds_destroy(&s);
4984 }
4985 error = ofproto_dpif_flow_mod_init_for_learn(ctx->xbridge->ofproto,
4986 &fm, ofm);
4987 ofpbuf_uninit(&ofpacts);
4988
4989 if (!error) {
4990 bool success = true;
4991 if (ctx->xin->allow_side_effects) {
4992 error = ofproto_flow_mod_learn(ofm, ctx->xin->xcache != NULL,
4993 learn->limit, &success);
4994 } else if (learn->limit) {
4995 if (!ofm->temp_rule
4996 || ofm->temp_rule->state != RULE_INSERTED) {
4997 /* The learned rule expired and there are no packets, so
4998 * we cannot learn again. Since the translated actions
4999 * depend on the result of learning, we tell the caller
5000 * that there's no point in caching this result. */
5001 ctx->xout->avoid_caching = true;
5002 }
5003 }
5004
5005 if (learn->flags & NX_LEARN_F_WRITE_RESULT) {
5006 nxm_reg_load(&learn->result_dst, success ? 1 : 0,
5007 &ctx->xin->flow, ctx->wc);
5008 xlate_report_subfield(ctx, &learn->result_dst);
5009 }
5010
5011 if (success && ctx->xin->xcache) {
5012 struct xc_entry *entry;
5013
5014 entry = xlate_cache_add_entry(ctx->xin->xcache, XC_LEARN);
5015 entry->learn.ofm = ofm;
5016 entry->learn.limit = learn->limit;
5017 ofm = NULL;
5018 } else {
5019 ofproto_flow_mod_uninit(ofm);
5020 }
5021
5022 if (OVS_UNLIKELY(ctx->xin->trace && !success)) {
5023 xlate_report(ctx, OFT_DETAIL, "Limit exceeded, learn failed");
5024 }
5025 }
5026
5027 if (ofm != &ofm__) {
5028 free(ofm);
5029 }
5030
5031 if (error) {
5032 xlate_report_error(ctx, "LEARN action execution failed (%s).",
5033 ofperr_to_string(error));
5034 }
5035 } else {
5036 xlate_report(ctx, OFT_WARN,
5037 "suppressing side effects, so learn action ignored");
5038 }
5039 }
5040
5041 static void
5042 xlate_fin_timeout__(struct rule_dpif *rule, uint16_t tcp_flags,
5043 uint16_t idle_timeout, uint16_t hard_timeout)
5044 {
5045 if (tcp_flags & (TCP_FIN | TCP_RST)) {
5046 ofproto_rule_reduce_timeouts(&rule->up, idle_timeout, hard_timeout);
5047 }
5048 }
5049
5050 static void
5051 xlate_fin_timeout(struct xlate_ctx *ctx,
5052 const struct ofpact_fin_timeout *oft)
5053 {
5054 if (ctx->rule) {
5055 if (ctx->xin->allow_side_effects) {
5056 xlate_fin_timeout__(ctx->rule, ctx->xin->tcp_flags,
5057 oft->fin_idle_timeout, oft->fin_hard_timeout);
5058 }
5059 if (ctx->xin->xcache) {
5060 struct xc_entry *entry;
5061
5062 entry = xlate_cache_add_entry(ctx->xin->xcache, XC_FIN_TIMEOUT);
5063 /* XC_RULE already holds a reference on the rule, none is taken
5064 * here. */
5065 entry->fin.rule = ctx->rule;
5066 entry->fin.idle = oft->fin_idle_timeout;
5067 entry->fin.hard = oft->fin_hard_timeout;
5068 }
5069 }
5070 }
5071
5072 static void
5073 xlate_sample_action(struct xlate_ctx *ctx,
5074 const struct ofpact_sample *os)
5075 {
5076 odp_port_t output_odp_port = ODPP_NONE;
5077 odp_port_t tunnel_out_port = ODPP_NONE;
5078 struct dpif_ipfix *ipfix = ctx->xbridge->ipfix;
5079 bool emit_set_tunnel = false;
5080
5081 if (!ipfix || ctx->xin->flow.in_port.ofp_port == OFPP_NONE) {
5082 return;
5083 }
5084
5085 /* Scale the probability from 16-bit to 32-bit while representing
5086 * the same percentage. */
5087 uint32_t probability = (os->probability << 16) | os->probability;
5088
5089 /* If ofp_port in flow sample action is equel to ofp_port,
5090 * this sample action is a input port action. */
5091 if (os->sampling_port != OFPP_NONE &&
5092 os->sampling_port != ctx->xin->flow.in_port.ofp_port) {
5093 output_odp_port = ofp_port_to_odp_port(ctx->xbridge,
5094 os->sampling_port);
5095 if (output_odp_port == ODPP_NONE) {
5096 xlate_report_error(ctx, "can't use unknown port %d in flow sample "
5097 "action", os->sampling_port);
5098 return;
5099 }
5100
5101 if (dpif_ipfix_get_flow_exporter_tunnel_sampling(ipfix,
5102 os->collector_set_id)
5103 && dpif_ipfix_is_tunnel_port(ipfix, output_odp_port)) {
5104 tunnel_out_port = output_odp_port;
5105 emit_set_tunnel = true;
5106 }
5107 }
5108
5109 xlate_commit_actions(ctx);
5110 /* If 'emit_set_tunnel', sample(sampling_port=1) would translate
5111 * into datapath sample action set(tunnel(...)), sample(...) and
5112 * it is used for sampling egress tunnel information. */
5113 if (emit_set_tunnel) {
5114 const struct xport *xport = get_ofp_port(ctx->xbridge,
5115 os->sampling_port);
5116
5117 if (xport && xport->is_tunnel) {
5118 struct flow *flow = &ctx->xin->flow;
5119 tnl_port_send(xport->ofport, flow, ctx->wc);
5120 if (!ovs_native_tunneling_is_on(ctx->xbridge->ofproto)) {
5121 struct flow_tnl flow_tnl = flow->tunnel;
5122
5123 commit_odp_tunnel_action(flow, &ctx->base_flow,
5124 ctx->odp_actions);
5125 flow->tunnel = flow_tnl;
5126 }
5127 } else {
5128 xlate_report_error(ctx,
5129 "sampling_port:%d should be a tunnel port.",
5130 os->sampling_port);
5131 }
5132 }
5133
5134 struct user_action_cookie cookie = {
5135 .type = USER_ACTION_COOKIE_FLOW_SAMPLE,
5136 .ofp_in_port = ctx->xin->flow.in_port.ofp_port,
5137 .ofproto_uuid = ctx->xbridge->ofproto->uuid,
5138 .flow_sample = {
5139 .probability = os->probability,
5140 .collector_set_id = os->collector_set_id,
5141 .obs_domain_id = os->obs_domain_id,
5142 .obs_point_id = os->obs_point_id,
5143 .output_odp_port = output_odp_port,
5144 .direction = os->direction,
5145 }
5146 };
5147 compose_sample_action(ctx, probability, &cookie, tunnel_out_port, false);
5148 }
5149
5150 /* Determine if an datapath action translated from the openflow action
5151 * can be reversed by another datapath action.
5152 *
5153 * Openflow actions that do not emit datapath actions are trivially
5154 * reversible. Reversiblity of other actions depends on nature of
5155 * action and their translation. */
5156 static bool
5157 reversible_actions(const struct ofpact *ofpacts, size_t ofpacts_len)
5158 {
5159 const struct ofpact *a;
5160
5161 OFPACT_FOR_EACH (a, ofpacts, ofpacts_len) {
5162 switch (a->type) {
5163 case OFPACT_BUNDLE:
5164 case OFPACT_CLEAR_ACTIONS:
5165 case OFPACT_CLONE:
5166 case OFPACT_CONJUNCTION:
5167 case OFPACT_CONTROLLER:
5168 case OFPACT_CT_CLEAR:
5169 case OFPACT_DEBUG_RECIRC:
5170 case OFPACT_DEBUG_SLOW:
5171 case OFPACT_DEC_MPLS_TTL:
5172 case OFPACT_DEC_TTL:
5173 case OFPACT_ENQUEUE:
5174 case OFPACT_EXIT:
5175 case OFPACT_FIN_TIMEOUT:
5176 case OFPACT_GOTO_TABLE:
5177 case OFPACT_GROUP:
5178 case OFPACT_LEARN:
5179 case OFPACT_MULTIPATH:
5180 case OFPACT_NOTE:
5181 case OFPACT_OUTPUT:
5182 case OFPACT_OUTPUT_REG:
5183 case OFPACT_POP_MPLS:
5184 case OFPACT_POP_QUEUE:
5185 case OFPACT_PUSH_MPLS:
5186 case OFPACT_PUSH_VLAN:
5187 case OFPACT_REG_MOVE:
5188 case OFPACT_RESUBMIT:
5189 case OFPACT_SAMPLE:
5190 case OFPACT_SET_ETH_DST:
5191 case OFPACT_SET_ETH_SRC:
5192 case OFPACT_SET_FIELD:
5193 case OFPACT_SET_IP_DSCP:
5194 case OFPACT_SET_IP_ECN:
5195 case OFPACT_SET_IP_TTL:
5196 case OFPACT_SET_IPV4_DST:
5197 case OFPACT_SET_IPV4_SRC:
5198 case OFPACT_SET_L4_DST_PORT:
5199 case OFPACT_SET_L4_SRC_PORT:
5200 case OFPACT_SET_MPLS_LABEL:
5201 case OFPACT_SET_MPLS_TC:
5202 case OFPACT_SET_MPLS_TTL:
5203 case OFPACT_SET_QUEUE:
5204 case OFPACT_SET_TUNNEL:
5205 case OFPACT_SET_VLAN_PCP:
5206 case OFPACT_SET_VLAN_VID:
5207 case OFPACT_STACK_POP:
5208 case OFPACT_STACK_PUSH:
5209 case OFPACT_STRIP_VLAN:
5210 case OFPACT_UNROLL_XLATE:
5211 case OFPACT_WRITE_ACTIONS:
5212 case OFPACT_WRITE_METADATA:
5213 break;
5214
5215 case OFPACT_CT:
5216 case OFPACT_METER:
5217 case OFPACT_NAT:
5218 case OFPACT_OUTPUT_TRUNC:
5219 case OFPACT_ENCAP:
5220 case OFPACT_DECAP:
5221 case OFPACT_DEC_NSH_TTL:
5222 return false;
5223 }
5224 }
5225 return true;
5226 }
5227
5228 static void
5229 clone_xlate_actions(const struct ofpact *actions, size_t actions_len,
5230 struct xlate_ctx *ctx, bool is_last_action)
5231 {
5232 struct ofpbuf old_stack = ctx->stack;
5233 union mf_subvalue new_stack[1024 / sizeof(union mf_subvalue)];
5234 ofpbuf_use_stub(&ctx->stack, new_stack, sizeof new_stack);
5235 ofpbuf_put(&ctx->stack, old_stack.data, old_stack.size);
5236
5237 struct ofpbuf old_action_set = ctx->action_set;
5238 uint64_t actset_stub[1024 / 8];
5239 ofpbuf_use_stub(&ctx->action_set, actset_stub, sizeof actset_stub);
5240 ofpbuf_put(&ctx->action_set, old_action_set.data, old_action_set.size);
5241
5242 size_t offset, ac_offset;
5243 struct flow old_flow = ctx->xin->flow;
5244
5245 if (reversible_actions(actions, actions_len) || is_last_action) {
5246 old_flow = ctx->xin->flow;
5247 do_xlate_actions(actions, actions_len, ctx, is_last_action);
5248 if (ctx->freezing) {
5249 finish_freezing(ctx);
5250 }
5251 goto xlate_done;
5252 }
5253
5254 /* Commit datapath actions before emitting the clone action to
5255 * avoid emitting those actions twice. Once inside
5256 * the clone, another time for the action after clone. */
5257 xlate_commit_actions(ctx);
5258 struct flow old_base = ctx->base_flow;
5259 bool old_was_mpls = ctx->was_mpls;
5260 bool old_conntracked = ctx->conntracked;
5261
5262 /* The actions are not reversible, a datapath clone action is
5263 * required to encode the translation. Select the clone action
5264 * based on datapath capabilities. */
5265 if (ctx->xbridge->support.clone) { /* Use clone action */
5266 /* Use clone action as datapath clone. */
5267 offset = nl_msg_start_nested(ctx->odp_actions, OVS_ACTION_ATTR_CLONE);
5268 do_xlate_actions(actions, actions_len, ctx, true);
5269 if (ctx->freezing) {
5270 finish_freezing(ctx);
5271 }
5272 nl_msg_end_non_empty_nested(ctx->odp_actions, offset);
5273 goto dp_clone_done;
5274 }
5275
5276 if (ctx->xbridge->support.sample_nesting > 3) {
5277 /* Use sample action as datapath clone. */
5278 offset = nl_msg_start_nested(ctx->odp_actions, OVS_ACTION_ATTR_SAMPLE);
5279 ac_offset = nl_msg_start_nested(ctx->odp_actions,
5280 OVS_SAMPLE_ATTR_ACTIONS);
5281 do_xlate_actions(actions, actions_len, ctx, true);
5282 if (ctx->freezing) {
5283 finish_freezing(ctx);
5284 }
5285 if (nl_msg_end_non_empty_nested(ctx->odp_actions, ac_offset)) {
5286 nl_msg_cancel_nested(ctx->odp_actions, offset);
5287 } else {
5288 nl_msg_put_u32(ctx->odp_actions, OVS_SAMPLE_ATTR_PROBABILITY,
5289 UINT32_MAX); /* 100% probability. */
5290 nl_msg_end_nested(ctx->odp_actions, offset);
5291 }
5292 goto dp_clone_done;
5293 }
5294
5295 /* Datapath does not support clone, skip xlate 'oc' and
5296 * report an error */
5297 xlate_report_error(ctx, "Failed to compose clone action");
5298
5299 dp_clone_done:
5300 /* The clone's conntrack execution should have no effect on the original
5301 * packet. */
5302 ctx->conntracked = old_conntracked;
5303
5304 /* Popping MPLS from the clone should have no effect on the original
5305 * packet. */
5306 ctx->was_mpls = old_was_mpls;
5307
5308 /* Restore the 'base_flow' for the next action. */
5309 ctx->base_flow = old_base;
5310
5311 xlate_done:
5312 ofpbuf_uninit(&ctx->action_set);
5313 ctx->action_set = old_action_set;
5314 ofpbuf_uninit(&ctx->stack);
5315 ctx->stack = old_stack;
5316 ctx->xin->flow = old_flow;
5317 }
5318
5319 static void
5320 compose_clone(struct xlate_ctx *ctx, const struct ofpact_nest *oc,
5321 bool is_last_action)
5322 {
5323 size_t oc_actions_len = ofpact_nest_get_action_len(oc);
5324
5325 clone_xlate_actions(oc->actions, oc_actions_len, ctx, is_last_action);
5326 }
5327
5328 static void
5329 xlate_meter_action(struct xlate_ctx *ctx, const struct ofpact_meter *meter)
5330 {
5331 if (meter->provider_meter_id != UINT32_MAX) {
5332 nl_msg_put_u32(ctx->odp_actions, OVS_ACTION_ATTR_METER,
5333 meter->provider_meter_id);
5334 }
5335 }
5336
5337 static bool
5338 may_receive(const struct xport *xport, struct xlate_ctx *ctx)
5339 {
5340 if (xport->config & (is_stp(&ctx->xin->flow)
5341 ? OFPUTIL_PC_NO_RECV_STP
5342 : OFPUTIL_PC_NO_RECV)) {
5343 return false;
5344 }
5345
5346 /* Only drop packets here if both forwarding and learning are
5347 * disabled. If just learning is enabled, we need to have
5348 * OFPP_NORMAL and the learning action have a look at the packet
5349 * before we can drop it. */
5350 if ((!xport_stp_forward_state(xport) && !xport_stp_learn_state(xport)) ||
5351 (!xport_rstp_forward_state(xport) && !xport_rstp_learn_state(xport))) {
5352 return false;
5353 }
5354
5355 return true;
5356 }
5357
5358 static void
5359 xlate_write_actions__(struct xlate_ctx *ctx,
5360 const struct ofpact *ofpacts, size_t ofpacts_len)
5361 {
5362 /* Maintain actset_output depending on the contents of the action set:
5363 *
5364 * - OFPP_UNSET, if there is no "output" action.
5365 *
5366 * - The output port, if there is an "output" action and no "group"
5367 * action.
5368 *
5369 * - OFPP_UNSET, if there is a "group" action.
5370 */
5371 if (!ctx->action_set_has_group) {
5372 const struct ofpact *a;
5373 OFPACT_FOR_EACH (a, ofpacts, ofpacts_len) {
5374 if (a->type == OFPACT_OUTPUT) {
5375 ctx->xin->flow.actset_output = ofpact_get_OUTPUT(a)->port;
5376 } else if (a->type == OFPACT_GROUP) {
5377 ctx->xin->flow.actset_output = OFPP_UNSET;
5378 ctx->action_set_has_group = true;
5379 break;
5380 }
5381 }
5382 }
5383
5384 ofpbuf_put(&ctx->action_set, ofpacts, ofpacts_len);
5385 }
5386
5387 static void
5388 xlate_write_actions(struct xlate_ctx *ctx, const struct ofpact_nest *a)
5389 {
5390 xlate_write_actions__(ctx, a->actions, ofpact_nest_get_action_len(a));
5391 }
5392
5393 static void
5394 xlate_action_set(struct xlate_ctx *ctx)
5395 {
5396 uint64_t action_list_stub[1024 / 8];
5397 struct ofpbuf action_list = OFPBUF_STUB_INITIALIZER(action_list_stub);
5398 ofpacts_execute_action_set(&action_list, &ctx->action_set);
5399 /* Clear the action set, as it is not needed any more. */
5400 ofpbuf_clear(&ctx->action_set);
5401 if (action_list.size) {
5402 ctx->in_action_set = true;
5403
5404 struct ovs_list *old_trace = ctx->xin->trace;
5405 ctx->xin->trace = xlate_report(ctx, OFT_TABLE,
5406 "--. Executing action set:");
5407 do_xlate_actions(action_list.data, action_list.size, ctx, true);
5408 ctx->xin->trace = old_trace;
5409
5410 ctx->in_action_set = false;
5411 }
5412 ofpbuf_uninit(&action_list);
5413 }
5414
5415 static void
5416 freeze_put_unroll_xlate(struct xlate_ctx *ctx)
5417 {
5418 struct ofpact_unroll_xlate *unroll = ctx->frozen_actions.header;
5419
5420 /* Restore the table_id and rule cookie for a potential PACKET
5421 * IN if needed. */
5422 if (!unroll ||
5423 (ctx->table_id != unroll->rule_table_id
5424 || ctx->rule_cookie != unroll->rule_cookie)) {
5425 unroll = ofpact_put_UNROLL_XLATE(&ctx->frozen_actions);
5426 unroll->rule_table_id = ctx->table_id;
5427 unroll->rule_cookie = ctx->rule_cookie;
5428 ctx->frozen_actions.header = unroll;
5429 }
5430 }
5431
5432
5433 /* Copy actions 'a' through 'end' to ctx->frozen_actions, which will be
5434 * executed after thawing. Inserts an UNROLL_XLATE action, if none is already
5435 * present, before any action that may depend on the current table ID or flow
5436 * cookie. */
5437 static void
5438 freeze_unroll_actions(const struct ofpact *a, const struct ofpact *end,
5439 struct xlate_ctx *ctx)
5440 {
5441 for (; a < end; a = ofpact_next(a)) {
5442 switch (a->type) {
5443 case OFPACT_OUTPUT_REG:
5444 case OFPACT_OUTPUT_TRUNC:
5445 case OFPACT_GROUP:
5446 case OFPACT_OUTPUT:
5447 case OFPACT_CONTROLLER:
5448 case OFPACT_DEC_MPLS_TTL:
5449 case OFPACT_DEC_NSH_TTL:
5450 case OFPACT_DEC_TTL:
5451 /* These actions may generate asynchronous messages, which include
5452 * table ID and flow cookie information. */
5453 freeze_put_unroll_xlate(ctx);
5454 break;
5455
5456 case OFPACT_RESUBMIT:
5457 if (ofpact_get_RESUBMIT(a)->table_id == 0xff) {
5458 /* This resubmit action is relative to the current table, so we
5459 * need to track what table that is.*/
5460 freeze_put_unroll_xlate(ctx);
5461 }
5462 break;
5463
5464 case OFPACT_SET_TUNNEL:
5465 case OFPACT_REG_MOVE:
5466 case OFPACT_SET_FIELD:
5467 case OFPACT_STACK_PUSH:
5468 case OFPACT_STACK_POP:
5469 case OFPACT_LEARN:
5470 case OFPACT_WRITE_METADATA:
5471 case OFPACT_GOTO_TABLE:
5472 case OFPACT_ENQUEUE:
5473 case OFPACT_SET_VLAN_VID:
5474 case OFPACT_SET_VLAN_PCP:
5475 case OFPACT_STRIP_VLAN:
5476 case OFPACT_PUSH_VLAN:
5477 case OFPACT_SET_ETH_SRC:
5478 case OFPACT_SET_ETH_DST:
5479 case OFPACT_SET_IPV4_SRC:
5480 case OFPACT_SET_IPV4_DST:
5481 case OFPACT_SET_IP_DSCP:
5482 case OFPACT_SET_IP_ECN:
5483 case OFPACT_SET_IP_TTL:
5484 case OFPACT_SET_L4_SRC_PORT:
5485 case OFPACT_SET_L4_DST_PORT:
5486 case OFPACT_SET_QUEUE:
5487 case OFPACT_POP_QUEUE:
5488 case OFPACT_PUSH_MPLS:
5489 case OFPACT_POP_MPLS:
5490 case OFPACT_SET_MPLS_LABEL:
5491 case OFPACT_SET_MPLS_TC:
5492 case OFPACT_SET_MPLS_TTL:
5493 case OFPACT_MULTIPATH:
5494 case OFPACT_BUNDLE:
5495 case OFPACT_EXIT:
5496 case OFPACT_UNROLL_XLATE:
5497 case OFPACT_FIN_TIMEOUT:
5498 case OFPACT_CLEAR_ACTIONS:
5499 case OFPACT_WRITE_ACTIONS:
5500 case OFPACT_METER:
5501 case OFPACT_SAMPLE:
5502 case OFPACT_CLONE:
5503 case OFPACT_ENCAP:
5504 case OFPACT_DECAP:
5505 case OFPACT_DEBUG_RECIRC:
5506 case OFPACT_DEBUG_SLOW:
5507 case OFPACT_CT:
5508 case OFPACT_CT_CLEAR:
5509 case OFPACT_NAT:
5510 /* These may not generate PACKET INs. */
5511 break;
5512
5513 case OFPACT_NOTE:
5514 case OFPACT_CONJUNCTION:
5515 /* These need not be copied for restoration. */
5516 continue;
5517 }
5518 /* Copy the action over. */
5519 ofpbuf_put(&ctx->frozen_actions, a, OFPACT_ALIGN(a->len));
5520 }
5521 }
5522
5523 static void
5524 put_ct_mark(const struct flow *flow, struct ofpbuf *odp_actions,
5525 struct flow_wildcards *wc)
5526 {
5527 if (wc->masks.ct_mark) {
5528 struct {
5529 uint32_t key;
5530 uint32_t mask;
5531 } *odp_ct_mark;
5532
5533 odp_ct_mark = nl_msg_put_unspec_uninit(odp_actions, OVS_CT_ATTR_MARK,
5534 sizeof(*odp_ct_mark));
5535 odp_ct_mark->key = flow->ct_mark & wc->masks.ct_mark;
5536 odp_ct_mark->mask = wc->masks.ct_mark;
5537 }
5538 }
5539
5540 static void
5541 put_ct_label(const struct flow *flow, struct ofpbuf *odp_actions,
5542 struct flow_wildcards *wc)
5543 {
5544 if (!ovs_u128_is_zero(wc->masks.ct_label)) {
5545 struct {
5546 ovs_u128 key;
5547 ovs_u128 mask;
5548 } odp_ct_label;
5549
5550 odp_ct_label.key = ovs_u128_and(flow->ct_label, wc->masks.ct_label);
5551 odp_ct_label.mask = wc->masks.ct_label;
5552 nl_msg_put_unspec(odp_actions, OVS_CT_ATTR_LABELS,
5553 &odp_ct_label, sizeof odp_ct_label);
5554 }
5555 }
5556
5557 static void
5558 put_ct_helper(struct xlate_ctx *ctx,
5559 struct ofpbuf *odp_actions, struct ofpact_conntrack *ofc)
5560 {
5561 if (ofc->alg) {
5562 switch(ofc->alg) {
5563 case IPPORT_FTP:
5564 nl_msg_put_string(odp_actions, OVS_CT_ATTR_HELPER, "ftp");
5565 break;
5566 case IPPORT_TFTP:
5567 nl_msg_put_string(odp_actions, OVS_CT_ATTR_HELPER, "tftp");
5568 break;
5569 default:
5570 xlate_report_error(ctx, "cannot serialize ct_helper %d", ofc->alg);
5571 break;
5572 }
5573 }
5574 }
5575
5576 static void
5577 put_ct_nat(struct xlate_ctx *ctx)
5578 {
5579 struct ofpact_nat *ofn = ctx->ct_nat_action;
5580 size_t nat_offset;
5581
5582 if (!ofn) {
5583 return;
5584 }
5585
5586 nat_offset = nl_msg_start_nested(ctx->odp_actions, OVS_CT_ATTR_NAT);
5587 if (ofn->flags & NX_NAT_F_SRC || ofn->flags & NX_NAT_F_DST) {
5588 nl_msg_put_flag(ctx->odp_actions, ofn->flags & NX_NAT_F_SRC
5589 ? OVS_NAT_ATTR_SRC : OVS_NAT_ATTR_DST);
5590 if (ofn->flags & NX_NAT_F_PERSISTENT) {
5591 nl_msg_put_flag(ctx->odp_actions, OVS_NAT_ATTR_PERSISTENT);
5592 }
5593 if (ofn->flags & NX_NAT_F_PROTO_HASH) {
5594 nl_msg_put_flag(ctx->odp_actions, OVS_NAT_ATTR_PROTO_HASH);
5595 } else if (ofn->flags & NX_NAT_F_PROTO_RANDOM) {
5596 nl_msg_put_flag(ctx->odp_actions, OVS_NAT_ATTR_PROTO_RANDOM);
5597 }
5598 if (ofn->range_af == AF_INET) {
5599 nl_msg_put_be32(ctx->odp_actions, OVS_NAT_ATTR_IP_MIN,
5600 ofn->range.addr.ipv4.min);
5601 if (ofn->range.addr.ipv4.max &&
5602 (ntohl(ofn->range.addr.ipv4.max)
5603 > ntohl(ofn->range.addr.ipv4.min))) {
5604 nl_msg_put_be32(ctx->odp_actions, OVS_NAT_ATTR_IP_MAX,
5605 ofn->range.addr.ipv4.max);
5606 }
5607 } else if (ofn->range_af == AF_INET6) {
5608 nl_msg_put_unspec(ctx->odp_actions, OVS_NAT_ATTR_IP_MIN,
5609 &ofn->range.addr.ipv6.min,
5610 sizeof ofn->range.addr.ipv6.min);
5611 if (!ipv6_mask_is_any(&ofn->range.addr.ipv6.max) &&
5612 memcmp(&ofn->range.addr.ipv6.max, &ofn->range.addr.ipv6.min,
5613 sizeof ofn->range.addr.ipv6.max) > 0) {
5614 nl_msg_put_unspec(ctx->odp_actions, OVS_NAT_ATTR_IP_MAX,
5615 &ofn->range.addr.ipv6.max,
5616 sizeof ofn->range.addr.ipv6.max);
5617 }
5618 }
5619 if (ofn->range_af != AF_UNSPEC && ofn->range.proto.min) {
5620 nl_msg_put_u16(ctx->odp_actions, OVS_NAT_ATTR_PROTO_MIN,
5621 ofn->range.proto.min);
5622 if (ofn->range.proto.max &&
5623 ofn->range.proto.max > ofn->range.proto.min) {
5624 nl_msg_put_u16(ctx->odp_actions, OVS_NAT_ATTR_PROTO_MAX,
5625 ofn->range.proto.max);
5626 }
5627 }
5628 }
5629 nl_msg_end_nested(ctx->odp_actions, nat_offset);
5630 }
5631
5632 static void
5633 compose_conntrack_action(struct xlate_ctx *ctx, struct ofpact_conntrack *ofc,
5634 bool is_last_action)
5635 {
5636 ovs_u128 old_ct_label_mask = ctx->wc->masks.ct_label;
5637 uint32_t old_ct_mark_mask = ctx->wc->masks.ct_mark;
5638 size_t ct_offset;
5639 uint16_t zone;
5640
5641 /* Ensure that any prior actions are applied before composing the new
5642 * conntrack action. */
5643 xlate_commit_actions(ctx);
5644
5645 /* Process nested actions first, to populate the key. */
5646 ctx->ct_nat_action = NULL;
5647 ctx->wc->masks.ct_mark = 0;
5648 ctx->wc->masks.ct_label = OVS_U128_ZERO;
5649 do_xlate_actions(ofc->actions, ofpact_ct_get_action_len(ofc), ctx,
5650 is_last_action);
5651
5652 if (ofc->zone_src.field) {
5653 zone = mf_get_subfield(&ofc->zone_src, &ctx->xin->flow);
5654 } else {
5655 zone = ofc->zone_imm;
5656 }
5657
5658 ct_offset = nl_msg_start_nested(ctx->odp_actions, OVS_ACTION_ATTR_CT);
5659 if (ofc->flags & NX_CT_F_COMMIT) {
5660 nl_msg_put_flag(ctx->odp_actions, ofc->flags & NX_CT_F_FORCE ?
5661 OVS_CT_ATTR_FORCE_COMMIT : OVS_CT_ATTR_COMMIT);
5662 if (ctx->xbridge->support.ct_eventmask) {
5663 nl_msg_put_u32(ctx->odp_actions, OVS_CT_ATTR_EVENTMASK,
5664 OVS_CT_EVENTMASK_DEFAULT);
5665 }
5666 }
5667 nl_msg_put_u16(ctx->odp_actions, OVS_CT_ATTR_ZONE, zone);
5668 put_ct_mark(&ctx->xin->flow, ctx->odp_actions, ctx->wc);
5669 put_ct_label(&ctx->xin->flow, ctx->odp_actions, ctx->wc);
5670 put_ct_helper(ctx, ctx->odp_actions, ofc);
5671 put_ct_nat(ctx);
5672 ctx->ct_nat_action = NULL;
5673 nl_msg_end_nested(ctx->odp_actions, ct_offset);
5674
5675 ctx->wc->masks.ct_mark = old_ct_mark_mask;
5676 ctx->wc->masks.ct_label = old_ct_label_mask;
5677
5678 if (ofc->recirc_table != NX_CT_RECIRC_NONE) {
5679 ctx->conntracked = true;
5680 compose_recirculate_and_fork(ctx, ofc->recirc_table, zone);
5681 }
5682
5683 /* The ct_* fields are only available in the scope of the 'recirc_table'
5684 * call chain. */
5685 flow_clear_conntrack(&ctx->xin->flow);
5686 ctx->conntracked = false;
5687 }
5688
5689 static void
5690 compose_ct_clear_action(struct xlate_ctx *ctx)
5691 {
5692 clear_conntrack(ctx);
5693 /* This action originally existed without dpif support. So to preserve
5694 * compatibility, only append it if the dpif supports it. */
5695 if (ctx->xbridge->support.ct_clear) {
5696 nl_msg_put_flag(ctx->odp_actions, OVS_ACTION_ATTR_CT_CLEAR);
5697 }
5698 }
5699
5700 static void
5701 rewrite_flow_encap_ethernet(struct xlate_ctx *ctx,
5702 struct flow *flow,
5703 struct flow_wildcards *wc)
5704 {
5705 wc->masks.packet_type = OVS_BE32_MAX;
5706 if (pt_ns(flow->packet_type) == OFPHTN_ETHERTYPE) {
5707 /* Only adjust the packet_type and zero the dummy Ethernet addresses. */
5708 ovs_be16 ethertype = pt_ns_type_be(flow->packet_type);
5709 flow->packet_type = htonl(PT_ETH);
5710 flow->dl_src = eth_addr_zero;
5711 flow->dl_dst = eth_addr_zero;
5712 flow->dl_type = ethertype;
5713 } else {
5714 /* Error handling: drop packet. */
5715 xlate_report_debug(ctx, OFT_ACTION,
5716 "Dropping packet as encap(ethernet) is not "
5717 "supported for packet type ethernet.");
5718 ctx->error = XLATE_UNSUPPORTED_PACKET_TYPE;
5719 }
5720 }
5721
5722 /* For an MD2 NSH header returns a pointer to an ofpbuf with the encoded
5723 * MD2 TLVs provided as encap properties to the encap operation. This
5724 * will be stored as encap_data in the ctx and copied into the push_nsh
5725 * action at the next commit. */
5726 static struct ofpbuf *
5727 rewrite_flow_push_nsh(struct xlate_ctx *ctx,
5728 const struct ofpact_encap *encap,
5729 struct flow *flow,
5730 struct flow_wildcards *wc)
5731 {
5732 ovs_be32 packet_type = flow->packet_type;
5733 const char *ptr = (char *) encap->props;
5734 struct ofpbuf *buf = ofpbuf_new(NSH_CTX_HDRS_MAX_LEN);
5735 uint8_t md_type = NSH_M_TYPE1;
5736 uint8_t np = 0;
5737 int i;
5738
5739 /* Scan the optional NSH encap TLV properties, if any. */
5740 for (i = 0; i < encap->n_props; i++) {
5741 struct ofpact_ed_prop *prop_ptr =
5742 ALIGNED_CAST(struct ofpact_ed_prop *, ptr);
5743 if (prop_ptr->prop_class == OFPPPC_NSH) {
5744 switch (prop_ptr->type) {
5745 case OFPPPT_PROP_NSH_MDTYPE: {
5746 struct ofpact_ed_prop_nsh_md_type *prop_md_type =
5747 ALIGNED_CAST(struct ofpact_ed_prop_nsh_md_type *,
5748 prop_ptr);
5749 md_type = prop_md_type->md_type;
5750 break;
5751 }
5752 case OFPPPT_PROP_NSH_TLV: {
5753 struct ofpact_ed_prop_nsh_tlv *tlv_prop =
5754 ALIGNED_CAST(struct ofpact_ed_prop_nsh_tlv *,
5755 prop_ptr);
5756 struct nsh_md2_tlv *md2_ctx =
5757 ofpbuf_put_uninit(buf, sizeof(*md2_ctx));
5758 md2_ctx->md_class = tlv_prop->tlv_class;
5759 md2_ctx->type = tlv_prop->tlv_type;
5760 md2_ctx->length = tlv_prop->tlv_len;
5761 size_t len = ROUND_UP(md2_ctx->length, 4);
5762 size_t padding = len - md2_ctx->length;
5763 ofpbuf_put(buf, tlv_prop->data, md2_ctx->length);
5764 ofpbuf_put_zeros(buf, padding);
5765 break;
5766 }
5767 default:
5768 /* No other NSH encap properties defined yet. */
5769 break;
5770 }
5771 }
5772 ptr += ROUND_UP(prop_ptr->len, 8);
5773 }
5774 if (buf->size == 0 || buf->size > NSH_CTX_HDRS_MAX_LEN) {
5775 ofpbuf_delete(buf);
5776 buf = NULL;
5777 }
5778
5779 /* Determine the Next Protocol field for NSH header. */
5780 switch (ntohl(packet_type)) {
5781 case PT_ETH:
5782 np = NSH_P_ETHERNET;
5783 break;
5784 case PT_IPV4:
5785 np = NSH_P_IPV4;
5786 break;
5787 case PT_IPV6:
5788 np = NSH_P_IPV6;
5789 break;
5790 case PT_NSH:
5791 np = NSH_P_NSH;
5792 break;
5793 default:
5794 /* Error handling: drop packet. */
5795 xlate_report_debug(ctx, OFT_ACTION,
5796 "Dropping packet as encap(nsh) is not "
5797 "supported for packet type (%d,0x%x)",
5798 pt_ns(packet_type), pt_ns_type(packet_type));
5799 ctx->error = XLATE_UNSUPPORTED_PACKET_TYPE;
5800 return buf;
5801 }
5802 /* Note that we have matched on packet_type! */
5803 wc->masks.packet_type = OVS_BE32_MAX;
5804
5805 /* Reset all current flow packet headers. */
5806 memset(&flow->dl_dst, 0,
5807 sizeof(struct flow) - offsetof(struct flow, dl_dst));
5808
5809 /* Populate the flow with the new NSH header. */
5810 flow->packet_type = htonl(PT_NSH);
5811 flow->dl_type = htons(ETH_TYPE_NSH);
5812 flow->nsh.flags = 0;
5813 flow->nsh.ttl = 63;
5814 flow->nsh.np = np;
5815 flow->nsh.path_hdr = htonl(255);
5816
5817 if (md_type == NSH_M_TYPE1) {
5818 flow->nsh.mdtype = NSH_M_TYPE1;
5819 memset(flow->nsh.context, 0, sizeof flow->nsh.context);
5820 if (buf) {
5821 /* Drop any MD2 context TLVs. */
5822 ofpbuf_delete(buf);
5823 buf = NULL;
5824 }
5825 } else if (md_type == NSH_M_TYPE2) {
5826 flow->nsh.mdtype = NSH_M_TYPE2;
5827 }
5828 flow->nsh.mdtype &= NSH_MDTYPE_MASK;
5829
5830 return buf;
5831 }
5832
5833 static void
5834 xlate_generic_encap_action(struct xlate_ctx *ctx,
5835 const struct ofpact_encap *encap)
5836 {
5837 struct flow *flow = &ctx->xin->flow;
5838 struct flow_wildcards *wc = ctx->wc;
5839 struct ofpbuf *encap_data = NULL;
5840
5841 /* Ensure that any pending actions on the inner packet are applied before
5842 * rewriting the flow */
5843 xlate_commit_actions(ctx);
5844
5845 /* Rewrite the flow to reflect the effect of pushing the new encap header. */
5846 switch (ntohl(encap->new_pkt_type)) {
5847 case PT_ETH:
5848 rewrite_flow_encap_ethernet(ctx, flow, wc);
5849 break;
5850 case PT_NSH:
5851 encap_data = rewrite_flow_push_nsh(ctx, encap, flow, wc);
5852 break;
5853 default:
5854 /* New packet type was checked during decoding. */
5855 OVS_NOT_REACHED();
5856 }
5857
5858 if (!ctx->error) {
5859 /* The actual encap datapath action will be generated at next commit. */
5860 ctx->pending_encap = true;
5861 ctx->encap_data = encap_data;
5862 }
5863 }
5864
5865 /* Returns true if packet must be recirculated after decapsulation. */
5866 static bool
5867 xlate_generic_decap_action(struct xlate_ctx *ctx,
5868 const struct ofpact_decap *decap OVS_UNUSED)
5869 {
5870 struct flow *flow = &ctx->xin->flow;
5871
5872 /* Ensure that any pending actions on the current packet are applied
5873 * before generating the decap action. */
5874 xlate_commit_actions(ctx);
5875
5876 /* We assume for now that the new_pkt_type is PT_USE_NEXT_PROTO. */
5877 switch (ntohl(flow->packet_type)) {
5878 case PT_ETH:
5879 if (flow->vlans[0].tci & htons(VLAN_CFI)) {
5880 /* Error handling: drop packet. */
5881 xlate_report_debug(ctx, OFT_ACTION, "Dropping packet, cannot "
5882 "decap Ethernet if VLAN is present.");
5883 ctx->error = XLATE_UNSUPPORTED_PACKET_TYPE;
5884 } else {
5885 /* Just change the packet_type.
5886 * Delay generating pop_eth to the next commit. */
5887 flow->packet_type = htonl(PACKET_TYPE(OFPHTN_ETHERTYPE,
5888 ntohs(flow->dl_type)));
5889 ctx->wc->masks.dl_type = OVS_BE16_MAX;
5890 }
5891 return false;
5892 case PT_NSH:
5893 /* The pop_nsh action is generated at the commit executed as
5894 * part of freezing the ctx for recirculation. Here we just set
5895 * the new packet type based on the NSH next protocol field. */
5896 switch (flow->nsh.np) {
5897 case NSH_P_ETHERNET:
5898 flow->packet_type = htonl(PT_ETH);
5899 break;
5900 case NSH_P_IPV4:
5901 flow->packet_type = htonl(PT_IPV4);
5902 break;
5903 case NSH_P_IPV6:
5904 flow->packet_type = htonl(PT_IPV6);
5905 break;
5906 case NSH_P_NSH:
5907 flow->packet_type = htonl(PT_NSH);
5908 break;
5909 default:
5910 /* Error handling: drop packet. */
5911 xlate_report_debug(ctx, OFT_ACTION,
5912 "Dropping packet as NSH next protocol %d "
5913 "is not supported", flow->nsh.np);
5914 ctx->error = XLATE_UNSUPPORTED_PACKET_TYPE;
5915 return false;
5916 break;
5917 }
5918 ctx->wc->masks.nsh.np = UINT8_MAX;
5919 /* Trigger recirculation. */
5920 return true;
5921 default:
5922 /* Error handling: drop packet. */
5923 xlate_report_debug(
5924 ctx, OFT_ACTION,
5925 "Dropping packet as the decap() does not support "
5926 "packet type (%d,0x%x)",
5927 pt_ns(flow->packet_type), pt_ns_type(flow->packet_type));
5928 ctx->error = XLATE_UNSUPPORTED_PACKET_TYPE;
5929 return false;
5930 }
5931 }
5932
5933 static void
5934 recirc_for_mpls(const struct ofpact *a, struct xlate_ctx *ctx)
5935 {
5936 /* No need to recirculate if already exiting. */
5937 if (ctx->exit) {
5938 return;
5939 }
5940
5941 /* Do not consider recirculating unless the packet was previously MPLS. */
5942 if (!ctx->was_mpls) {
5943 return;
5944 }
5945
5946 /* Special case these actions, only recirculating if necessary.
5947 * This avoids the overhead of recirculation in common use-cases.
5948 */
5949 switch (a->type) {
5950
5951 /* Output actions do not require recirculation. */
5952 case OFPACT_OUTPUT:
5953 case OFPACT_OUTPUT_TRUNC:
5954 case OFPACT_ENQUEUE:
5955 case OFPACT_OUTPUT_REG:
5956 /* Set actions that don't touch L3+ fields do not require recirculation. */
5957 case OFPACT_SET_VLAN_VID:
5958 case OFPACT_SET_VLAN_PCP:
5959 case OFPACT_SET_ETH_SRC:
5960 case OFPACT_SET_ETH_DST:
5961 case OFPACT_SET_TUNNEL:
5962 case OFPACT_SET_QUEUE:
5963 /* If actions of a group require recirculation that can be detected
5964 * when translating them. */
5965 case OFPACT_GROUP:
5966 return;
5967
5968 /* Set field that don't touch L3+ fields don't require recirculation. */
5969 case OFPACT_SET_FIELD:
5970 if (mf_is_l3_or_higher(ofpact_get_SET_FIELD(a)->field)) {
5971 break;
5972 }
5973 return;
5974
5975 /* For simplicity, recirculate in all other cases. */
5976 case OFPACT_CONTROLLER:
5977 case OFPACT_BUNDLE:
5978 case OFPACT_STRIP_VLAN:
5979 case OFPACT_PUSH_VLAN:
5980 case OFPACT_SET_IPV4_SRC:
5981 case OFPACT_SET_IPV4_DST:
5982 case OFPACT_SET_IP_DSCP:
5983 case OFPACT_SET_IP_ECN:
5984 case OFPACT_SET_IP_TTL:
5985 case OFPACT_SET_L4_SRC_PORT:
5986 case OFPACT_SET_L4_DST_PORT:
5987 case OFPACT_REG_MOVE:
5988 case OFPACT_STACK_PUSH:
5989 case OFPACT_STACK_POP:
5990 case OFPACT_DEC_TTL:
5991 case OFPACT_SET_MPLS_LABEL:
5992 case OFPACT_SET_MPLS_TC:
5993 case OFPACT_SET_MPLS_TTL:
5994 case OFPACT_DEC_MPLS_TTL:
5995 case OFPACT_PUSH_MPLS:
5996 case OFPACT_POP_MPLS:
5997 case OFPACT_POP_QUEUE:
5998 case OFPACT_FIN_TIMEOUT:
5999 case OFPACT_RESUBMIT:
6000 case OFPACT_LEARN:
6001 case OFPACT_CONJUNCTION:
6002 case OFPACT_MULTIPATH:
6003 case OFPACT_NOTE:
6004 case OFPACT_EXIT:
6005 case OFPACT_SAMPLE:
6006 case OFPACT_CLONE:
6007 case OFPACT_ENCAP:
6008 case OFPACT_DECAP:
6009 case OFPACT_DEC_NSH_TTL:
6010 case OFPACT_UNROLL_XLATE:
6011 case OFPACT_CT:
6012 case OFPACT_CT_CLEAR:
6013 case OFPACT_NAT:
6014 case OFPACT_DEBUG_RECIRC:
6015 case OFPACT_DEBUG_SLOW:
6016 case OFPACT_METER:
6017 case OFPACT_CLEAR_ACTIONS:
6018 case OFPACT_WRITE_ACTIONS:
6019 case OFPACT_WRITE_METADATA:
6020 case OFPACT_GOTO_TABLE:
6021 default:
6022 break;
6023 }
6024
6025 /* Recirculate */
6026 ctx_trigger_freeze(ctx);
6027 }
6028
6029 static void
6030 xlate_ofpact_reg_move(struct xlate_ctx *ctx, const struct ofpact_reg_move *a)
6031 {
6032 mf_subfield_copy(&a->src, &a->dst, &ctx->xin->flow, ctx->wc);
6033 xlate_report_subfield(ctx, &a->dst);
6034 }
6035
6036 static void
6037 xlate_ofpact_stack_pop(struct xlate_ctx *ctx, const struct ofpact_stack *a)
6038 {
6039 if (nxm_execute_stack_pop(a, &ctx->xin->flow, ctx->wc, &ctx->stack)) {
6040 xlate_report_subfield(ctx, &a->subfield);
6041 } else {
6042 xlate_report_error(ctx, "stack underflow");
6043 }
6044 }
6045
6046 /* Restore translation context data that was stored earlier. */
6047 static void
6048 xlate_ofpact_unroll_xlate(struct xlate_ctx *ctx,
6049 const struct ofpact_unroll_xlate *a)
6050 {
6051 ctx->table_id = a->rule_table_id;
6052 ctx->rule_cookie = a->rule_cookie;
6053 xlate_report(ctx, OFT_THAW, "restored state: table=%"PRIu8", "
6054 "cookie=%#"PRIx64, a->rule_table_id, a->rule_cookie);
6055 }
6056
6057 static void
6058 do_xlate_actions(const struct ofpact *ofpacts, size_t ofpacts_len,
6059 struct xlate_ctx *ctx, bool is_last_action)
6060 {
6061 struct flow_wildcards *wc = ctx->wc;
6062 struct flow *flow = &ctx->xin->flow;
6063 const struct ofpact *a;
6064
6065 if (ovs_native_tunneling_is_on(ctx->xbridge->ofproto)) {
6066 tnl_neigh_snoop(flow, wc, ctx->xbridge->name);
6067 }
6068 /* dl_type already in the mask, not set below. */
6069
6070 if (!ofpacts_len) {
6071 xlate_report(ctx, OFT_ACTION, "drop");
6072 return;
6073 }
6074
6075 OFPACT_FOR_EACH (a, ofpacts, ofpacts_len) {
6076 struct ofpact_controller *controller;
6077 const struct ofpact_metadata *metadata;
6078 const struct ofpact_set_field *set_field;
6079 const struct mf_field *mf;
6080 bool last = is_last_action && ofpact_last(a, ofpacts, ofpacts_len)
6081 && ctx->action_set.size;
6082
6083 if (ctx->error) {
6084 break;
6085 }
6086
6087 recirc_for_mpls(a, ctx);
6088
6089 if (ctx->exit) {
6090 /* Check if need to store the remaining actions for later
6091 * execution. */
6092 if (ctx->freezing) {
6093 freeze_unroll_actions(a, ofpact_end(ofpacts, ofpacts_len),
6094 ctx);
6095 }
6096 break;
6097 }
6098
6099 if (OVS_UNLIKELY(ctx->xin->trace)) {
6100 struct ds s = DS_EMPTY_INITIALIZER;
6101 ofpacts_format(a, OFPACT_ALIGN(a->len), NULL, &s);
6102 xlate_report(ctx, OFT_ACTION, "%s", ds_cstr(&s));
6103 ds_destroy(&s);
6104 }
6105
6106 switch (a->type) {
6107 case OFPACT_OUTPUT:
6108 xlate_output_action(ctx, ofpact_get_OUTPUT(a)->port,
6109 ofpact_get_OUTPUT(a)->max_len, true, last,
6110 false);
6111 break;
6112
6113 case OFPACT_GROUP:
6114 if (xlate_group_action(ctx, ofpact_get_GROUP(a)->group_id, last)) {
6115 /* Group could not be found. */
6116
6117 /* XXX: Terminates action list translation, but does not
6118 * terminate the pipeline. */
6119 return;
6120 }
6121 break;
6122
6123 case OFPACT_CONTROLLER:
6124 controller = ofpact_get_CONTROLLER(a);
6125 if (controller->pause) {
6126 ctx->pause = controller;
6127 ctx_trigger_freeze(ctx);
6128 a = ofpact_next(a);
6129 } else {
6130 xlate_controller_action(ctx, controller->max_len,
6131 controller->reason,
6132 controller->controller_id,
6133 controller->userdata,
6134 controller->userdata_len);
6135 }
6136 break;
6137
6138 case OFPACT_ENQUEUE:
6139 memset(&wc->masks.skb_priority, 0xff,
6140 sizeof wc->masks.skb_priority);
6141 xlate_enqueue_action(ctx, ofpact_get_ENQUEUE(a), last);
6142 break;
6143
6144 case OFPACT_SET_VLAN_VID:
6145 wc->masks.vlans[0].tci |= htons(VLAN_VID_MASK | VLAN_CFI);
6146 if (flow->vlans[0].tci & htons(VLAN_CFI) ||
6147 ofpact_get_SET_VLAN_VID(a)->push_vlan_if_needed) {
6148 if (!flow->vlans[0].tpid) {
6149 flow->vlans[0].tpid = htons(ETH_TYPE_VLAN);
6150 }
6151 flow->vlans[0].tci &= ~htons(VLAN_VID_MASK);
6152 flow->vlans[0].tci |=
6153 (htons(ofpact_get_SET_VLAN_VID(a)->vlan_vid) |
6154 htons(VLAN_CFI));
6155 }
6156 break;
6157
6158 case OFPACT_SET_VLAN_PCP:
6159 wc->masks.vlans[0].tci |= htons(VLAN_PCP_MASK | VLAN_CFI);
6160 if (flow->vlans[0].tci & htons(VLAN_CFI) ||
6161 ofpact_get_SET_VLAN_PCP(a)->push_vlan_if_needed) {
6162 if (!flow->vlans[0].tpid) {
6163 flow->vlans[0].tpid = htons(ETH_TYPE_VLAN);
6164 }
6165 flow->vlans[0].tci &= ~htons(VLAN_PCP_MASK);
6166 flow->vlans[0].tci |=
6167 htons((ofpact_get_SET_VLAN_PCP(a)->vlan_pcp
6168 << VLAN_PCP_SHIFT) | VLAN_CFI);
6169 }
6170 break;
6171
6172 case OFPACT_STRIP_VLAN:
6173 flow_pop_vlan(flow, wc);
6174 break;
6175
6176 case OFPACT_PUSH_VLAN:
6177 flow_push_vlan_uninit(flow, wc);
6178 flow->vlans[0].tpid = ofpact_get_PUSH_VLAN(a)->ethertype;
6179 flow->vlans[0].tci = htons(VLAN_CFI);
6180 break;
6181
6182 case OFPACT_SET_ETH_SRC:
6183 WC_MASK_FIELD(wc, dl_src);
6184 flow->dl_src = ofpact_get_SET_ETH_SRC(a)->mac;
6185 break;
6186
6187 case OFPACT_SET_ETH_DST:
6188 WC_MASK_FIELD(wc, dl_dst);
6189 flow->dl_dst = ofpact_get_SET_ETH_DST(a)->mac;
6190 break;
6191
6192 case OFPACT_SET_IPV4_SRC:
6193 if (flow->dl_type == htons(ETH_TYPE_IP)) {
6194 memset(&wc->masks.nw_src, 0xff, sizeof wc->masks.nw_src);
6195 flow->nw_src = ofpact_get_SET_IPV4_SRC(a)->ipv4;
6196 }
6197 break;
6198
6199 case OFPACT_SET_IPV4_DST:
6200 if (flow->dl_type == htons(ETH_TYPE_IP)) {
6201 memset(&wc->masks.nw_dst, 0xff, sizeof wc->masks.nw_dst);
6202 flow->nw_dst = ofpact_get_SET_IPV4_DST(a)->ipv4;
6203 }
6204 break;
6205
6206 case OFPACT_SET_IP_DSCP:
6207 if (is_ip_any(flow)) {
6208 wc->masks.nw_tos |= IP_DSCP_MASK;
6209 flow->nw_tos &= ~IP_DSCP_MASK;
6210 flow->nw_tos |= ofpact_get_SET_IP_DSCP(a)->dscp;
6211 }
6212 break;
6213
6214 case OFPACT_SET_IP_ECN:
6215 if (is_ip_any(flow)) {
6216 wc->masks.nw_tos |= IP_ECN_MASK;
6217 flow->nw_tos &= ~IP_ECN_MASK;
6218 flow->nw_tos |= ofpact_get_SET_IP_ECN(a)->ecn;
6219 }
6220 break;
6221
6222 case OFPACT_SET_IP_TTL:
6223 if (is_ip_any(flow)) {
6224 wc->masks.nw_ttl = 0xff;
6225 flow->nw_ttl = ofpact_get_SET_IP_TTL(a)->ttl;
6226 }
6227 break;
6228
6229 case OFPACT_SET_L4_SRC_PORT:
6230 if (is_ip_any(flow) && !(flow->nw_frag & FLOW_NW_FRAG_LATER)) {
6231 memset(&wc->masks.nw_proto, 0xff, sizeof wc->masks.nw_proto);
6232 memset(&wc->masks.tp_src, 0xff, sizeof wc->masks.tp_src);
6233 flow->tp_src = htons(ofpact_get_SET_L4_SRC_PORT(a)->port);
6234 }
6235 break;
6236
6237 case OFPACT_SET_L4_DST_PORT:
6238 if (is_ip_any(flow) && !(flow->nw_frag & FLOW_NW_FRAG_LATER)) {
6239 memset(&wc->masks.nw_proto, 0xff, sizeof wc->masks.nw_proto);
6240 memset(&wc->masks.tp_dst, 0xff, sizeof wc->masks.tp_dst);
6241 flow->tp_dst = htons(ofpact_get_SET_L4_DST_PORT(a)->port);
6242 }
6243 break;
6244
6245 case OFPACT_RESUBMIT:
6246 /* Freezing complicates resubmit. Some action in the flow
6247 * entry found by resubmit might trigger freezing. If that
6248 * happens, then we do not want to execute the resubmit again after
6249 * during thawing, so we want to skip back to the head of the loop
6250 * to avoid that, only adding any actions that follow the resubmit
6251 * to the frozen actions.
6252 */
6253 xlate_ofpact_resubmit(ctx, ofpact_get_RESUBMIT(a), last);
6254 continue;
6255
6256 case OFPACT_SET_TUNNEL:
6257 flow->tunnel.tun_id = htonll(ofpact_get_SET_TUNNEL(a)->tun_id);
6258 break;
6259
6260 case OFPACT_SET_QUEUE:
6261 memset(&wc->masks.skb_priority, 0xff,
6262 sizeof wc->masks.skb_priority);
6263 xlate_set_queue_action(ctx, ofpact_get_SET_QUEUE(a)->queue_id);
6264 break;
6265
6266 case OFPACT_POP_QUEUE:
6267 memset(&wc->masks.skb_priority, 0xff,
6268 sizeof wc->masks.skb_priority);
6269 if (flow->skb_priority != ctx->orig_skb_priority) {
6270 flow->skb_priority = ctx->orig_skb_priority;
6271 xlate_report(ctx, OFT_DETAIL, "queue = %#"PRIx32,
6272 flow->skb_priority);
6273 }
6274 break;
6275
6276 case OFPACT_REG_MOVE:
6277 xlate_ofpact_reg_move(ctx, ofpact_get_REG_MOVE(a));
6278 break;
6279
6280 case OFPACT_SET_FIELD:
6281 set_field = ofpact_get_SET_FIELD(a);
6282 mf = set_field->field;
6283
6284 /* Set the field only if the packet actually has it. */
6285 if (mf_are_prereqs_ok(mf, flow, wc)) {
6286 mf_mask_field_masked(mf, ofpact_set_field_mask(set_field), wc);
6287 mf_set_flow_value_masked(mf, set_field->value,
6288 ofpact_set_field_mask(set_field),
6289 flow);
6290 } else {
6291 xlate_report(ctx, OFT_WARN,
6292 "unmet prerequisites for %s, set_field ignored",
6293 mf->name);
6294
6295 }
6296 break;
6297
6298 case OFPACT_STACK_PUSH:
6299 nxm_execute_stack_push(ofpact_get_STACK_PUSH(a), flow, wc,
6300 &ctx->stack);
6301 break;
6302
6303 case OFPACT_STACK_POP:
6304 xlate_ofpact_stack_pop(ctx, ofpact_get_STACK_POP(a));
6305 break;
6306
6307 case OFPACT_PUSH_MPLS:
6308 compose_mpls_push_action(ctx, ofpact_get_PUSH_MPLS(a));
6309 break;
6310
6311 case OFPACT_POP_MPLS:
6312 compose_mpls_pop_action(ctx, ofpact_get_POP_MPLS(a)->ethertype);
6313 break;
6314
6315 case OFPACT_SET_MPLS_LABEL:
6316 compose_set_mpls_label_action(
6317 ctx, ofpact_get_SET_MPLS_LABEL(a)->label);
6318 break;
6319
6320 case OFPACT_SET_MPLS_TC:
6321 compose_set_mpls_tc_action(ctx, ofpact_get_SET_MPLS_TC(a)->tc);
6322 break;
6323
6324 case OFPACT_SET_MPLS_TTL:
6325 compose_set_mpls_ttl_action(ctx, ofpact_get_SET_MPLS_TTL(a)->ttl);
6326 break;
6327
6328 case OFPACT_DEC_MPLS_TTL:
6329 if (compose_dec_mpls_ttl_action(ctx)) {
6330 return;
6331 }
6332 break;
6333
6334 case OFPACT_DEC_NSH_TTL:
6335 if (compose_dec_nsh_ttl_action(ctx)) {
6336 return;
6337 }
6338 break;
6339
6340 case OFPACT_DEC_TTL:
6341 wc->masks.nw_ttl = 0xff;
6342 if (compose_dec_ttl(ctx, ofpact_get_DEC_TTL(a))) {
6343 return;
6344 }
6345 break;
6346
6347 case OFPACT_NOTE:
6348 /* Nothing to do. */
6349 break;
6350
6351 case OFPACT_MULTIPATH:
6352 multipath_execute(ofpact_get_MULTIPATH(a), flow, wc);
6353 xlate_report_subfield(ctx, &ofpact_get_MULTIPATH(a)->dst);
6354 break;
6355
6356 case OFPACT_BUNDLE:
6357 xlate_bundle_action(ctx, ofpact_get_BUNDLE(a), last);
6358 break;
6359
6360 case OFPACT_OUTPUT_REG:
6361 xlate_output_reg_action(ctx, ofpact_get_OUTPUT_REG(a), last);
6362 break;
6363
6364 case OFPACT_OUTPUT_TRUNC:
6365 xlate_output_trunc_action(ctx, ofpact_get_OUTPUT_TRUNC(a)->port,
6366 ofpact_get_OUTPUT_TRUNC(a)->max_len, last);
6367 break;
6368
6369 case OFPACT_LEARN:
6370 xlate_learn_action(ctx, ofpact_get_LEARN(a));
6371 break;
6372
6373 case OFPACT_CONJUNCTION:
6374 /* A flow with a "conjunction" action represents part of a special
6375 * kind of "set membership match". Such a flow should not actually
6376 * get executed, but it could via, say, a "packet-out", even though
6377 * that wouldn't be useful. Log it to help debugging. */
6378 xlate_report_error(ctx, "executing no-op conjunction action");
6379 break;
6380
6381 case OFPACT_EXIT:
6382 ctx->exit = true;
6383 break;
6384
6385 case OFPACT_UNROLL_XLATE:
6386 xlate_ofpact_unroll_xlate(ctx, ofpact_get_UNROLL_XLATE(a));
6387 break;
6388
6389 case OFPACT_FIN_TIMEOUT:
6390 memset(&wc->masks.nw_proto, 0xff, sizeof wc->masks.nw_proto);
6391 xlate_fin_timeout(ctx, ofpact_get_FIN_TIMEOUT(a));
6392 break;
6393
6394 case OFPACT_CLEAR_ACTIONS:
6395 xlate_report_action_set(ctx, "was");
6396 ofpbuf_clear(&ctx->action_set);
6397 ctx->xin->flow.actset_output = OFPP_UNSET;
6398 ctx->action_set_has_group = false;
6399 break;
6400
6401 case OFPACT_WRITE_ACTIONS:
6402 xlate_write_actions(ctx, ofpact_get_WRITE_ACTIONS(a));
6403 xlate_report_action_set(ctx, "is");
6404 break;
6405
6406 case OFPACT_WRITE_METADATA:
6407 metadata = ofpact_get_WRITE_METADATA(a);
6408 flow->metadata &= ~metadata->mask;
6409 flow->metadata |= metadata->metadata & metadata->mask;
6410 break;
6411
6412 case OFPACT_METER:
6413 xlate_meter_action(ctx, ofpact_get_METER(a));
6414 break;
6415
6416 case OFPACT_GOTO_TABLE: {
6417 struct ofpact_goto_table *ogt = ofpact_get_GOTO_TABLE(a);
6418
6419 ovs_assert(ctx->table_id < ogt->table_id);
6420
6421 xlate_table_action(ctx, ctx->xin->flow.in_port.ofp_port,
6422 ogt->table_id, true, true, false, last,
6423 do_xlate_actions);
6424 break;
6425 }
6426
6427 case OFPACT_SAMPLE:
6428 xlate_sample_action(ctx, ofpact_get_SAMPLE(a));
6429 break;
6430
6431 case OFPACT_CLONE:
6432 compose_clone(ctx, ofpact_get_CLONE(a), last);
6433 break;
6434
6435 case OFPACT_ENCAP:
6436 xlate_generic_encap_action(ctx, ofpact_get_ENCAP(a));
6437 break;
6438
6439 case OFPACT_DECAP: {
6440 bool recirc_needed =
6441 xlate_generic_decap_action(ctx, ofpact_get_DECAP(a));
6442 if (!ctx->error && recirc_needed) {
6443 /* Recirculate for parsing of inner packet. */
6444 ctx_trigger_freeze(ctx);
6445 /* Then continue with next action. */
6446 a = ofpact_next(a);
6447 }
6448 break;
6449 }
6450
6451 case OFPACT_CT:
6452 compose_conntrack_action(ctx, ofpact_get_CT(a), last);
6453 break;
6454
6455 case OFPACT_CT_CLEAR:
6456 compose_ct_clear_action(ctx);
6457 break;
6458
6459 case OFPACT_NAT:
6460 /* This will be processed by compose_conntrack_action(). */
6461 ctx->ct_nat_action = ofpact_get_NAT(a);
6462 break;
6463
6464 case OFPACT_DEBUG_RECIRC:
6465 ctx_trigger_freeze(ctx);
6466 a = ofpact_next(a);
6467 break;
6468
6469 case OFPACT_DEBUG_SLOW:
6470 ctx->xout->slow |= SLOW_ACTION;
6471 break;
6472 }
6473
6474 /* Check if need to store this and the remaining actions for later
6475 * execution. */
6476 if (!ctx->error && ctx->exit && ctx_first_frozen_action(ctx)) {
6477 freeze_unroll_actions(a, ofpact_end(ofpacts, ofpacts_len), ctx);
6478 break;
6479 }
6480 }
6481 }
6482
6483 void
6484 xlate_in_init(struct xlate_in *xin, struct ofproto_dpif *ofproto,
6485 ovs_version_t version, const struct flow *flow,
6486 ofp_port_t in_port, struct rule_dpif *rule, uint16_t tcp_flags,
6487 const struct dp_packet *packet, struct flow_wildcards *wc,
6488 struct ofpbuf *odp_actions)
6489 {
6490 xin->ofproto = ofproto;
6491 xin->tables_version = version;
6492 xin->flow = *flow;
6493 xin->upcall_flow = flow;
6494 xin->flow.in_port.ofp_port = in_port;
6495 xin->flow.actset_output = OFPP_UNSET;
6496 xin->packet = packet;
6497 xin->allow_side_effects = packet != NULL;
6498 xin->rule = rule;
6499 xin->xcache = NULL;
6500 xin->ofpacts = NULL;
6501 xin->ofpacts_len = 0;
6502 xin->tcp_flags = tcp_flags;
6503 xin->trace = NULL;
6504 xin->resubmit_stats = NULL;
6505 xin->depth = 0;
6506 xin->resubmits = 0;
6507 xin->wc = wc;
6508 xin->odp_actions = odp_actions;
6509 xin->in_packet_out = false;
6510 xin->recirc_queue = NULL;
6511
6512 /* Do recirc lookup. */
6513 xin->frozen_state = NULL;
6514 if (flow->recirc_id) {
6515 const struct recirc_id_node *node
6516 = recirc_id_node_find(flow->recirc_id);
6517 if (node) {
6518 xin->frozen_state = &node->state;
6519 }
6520 }
6521 }
6522
6523 void
6524 xlate_out_uninit(struct xlate_out *xout)
6525 {
6526 if (xout) {
6527 recirc_refs_unref(&xout->recircs);
6528 }
6529 }
6530 \f
6531 static struct skb_priority_to_dscp *
6532 get_skb_priority(const struct xport *xport, uint32_t skb_priority)
6533 {
6534 struct skb_priority_to_dscp *pdscp;
6535 uint32_t hash;
6536
6537 hash = hash_int(skb_priority, 0);
6538 HMAP_FOR_EACH_IN_BUCKET (pdscp, hmap_node, hash, &xport->skb_priorities) {
6539 if (pdscp->skb_priority == skb_priority) {
6540 return pdscp;
6541 }
6542 }
6543 return NULL;
6544 }
6545
6546 static bool
6547 dscp_from_skb_priority(const struct xport *xport, uint32_t skb_priority,
6548 uint8_t *dscp)
6549 {
6550 struct skb_priority_to_dscp *pdscp = get_skb_priority(xport, skb_priority);
6551 *dscp = pdscp ? pdscp->dscp : 0;
6552 return pdscp != NULL;
6553 }
6554
6555 static size_t
6556 count_skb_priorities(const struct xport *xport)
6557 {
6558 return hmap_count(&xport->skb_priorities);
6559 }
6560
6561 static void
6562 clear_skb_priorities(struct xport *xport)
6563 {
6564 struct skb_priority_to_dscp *pdscp;
6565
6566 HMAP_FOR_EACH_POP (pdscp, hmap_node, &xport->skb_priorities) {
6567 free(pdscp);
6568 }
6569 }
6570
6571 static bool
6572 actions_output_to_local_port(const struct xlate_ctx *ctx)
6573 {
6574 odp_port_t local_odp_port = ofp_port_to_odp_port(ctx->xbridge, OFPP_LOCAL);
6575 const struct nlattr *a;
6576 unsigned int left;
6577
6578 NL_ATTR_FOR_EACH_UNSAFE (a, left, ctx->odp_actions->data,
6579 ctx->odp_actions->size) {
6580 if (nl_attr_type(a) == OVS_ACTION_ATTR_OUTPUT
6581 && nl_attr_get_odp_port(a) == local_odp_port) {
6582 return true;
6583 }
6584 }
6585 return false;
6586 }
6587
6588 #if defined(__linux__)
6589 /* Returns the maximum number of packets that the Linux kernel is willing to
6590 * queue up internally to certain kinds of software-implemented ports, or the
6591 * default (and rarely modified) value if it cannot be determined. */
6592 static int
6593 netdev_max_backlog(void)
6594 {
6595 static struct ovsthread_once once = OVSTHREAD_ONCE_INITIALIZER;
6596 static int max_backlog = 1000; /* The normal default value. */
6597
6598 if (ovsthread_once_start(&once)) {
6599 static const char filename[] = "/proc/sys/net/core/netdev_max_backlog";
6600 FILE *stream;
6601 int n;
6602
6603 stream = fopen(filename, "r");
6604 if (!stream) {
6605 VLOG_INFO("%s: open failed (%s)", filename, ovs_strerror(errno));
6606 } else {
6607 if (fscanf(stream, "%d", &n) != 1) {
6608 VLOG_WARN("%s: read error", filename);
6609 } else if (n <= 100) {
6610 VLOG_WARN("%s: unexpectedly small value %d", filename, n);
6611 } else {
6612 max_backlog = n;
6613 }
6614 fclose(stream);
6615 }
6616 ovsthread_once_done(&once);
6617
6618 VLOG_DBG("%s: using %d max_backlog", filename, max_backlog);
6619 }
6620
6621 return max_backlog;
6622 }
6623
6624 /* Counts and returns the number of OVS_ACTION_ATTR_OUTPUT actions in
6625 * 'odp_actions'. */
6626 static int
6627 count_output_actions(const struct ofpbuf *odp_actions)
6628 {
6629 const struct nlattr *a;
6630 size_t left;
6631 int n = 0;
6632
6633 NL_ATTR_FOR_EACH_UNSAFE (a, left, odp_actions->data, odp_actions->size) {
6634 if (a->nla_type == OVS_ACTION_ATTR_OUTPUT) {
6635 n++;
6636 }
6637 }
6638 return n;
6639 }
6640 #endif /* defined(__linux__) */
6641
6642 /* Returns true if 'odp_actions' contains more output actions than the datapath
6643 * can reliably handle in one go. On Linux, this is the value of the
6644 * net.core.netdev_max_backlog sysctl, which limits the maximum number of
6645 * packets that the kernel is willing to queue up for processing while the
6646 * datapath is processing a set of actions. */
6647 static bool
6648 too_many_output_actions(const struct ofpbuf *odp_actions OVS_UNUSED)
6649 {
6650 #ifdef __linux__
6651 return (odp_actions->size / NL_A_U32_SIZE > netdev_max_backlog()
6652 && count_output_actions(odp_actions) > netdev_max_backlog());
6653 #else
6654 /* OSes other than Linux might have similar limits, but we don't know how
6655 * to determine them.*/
6656 return false;
6657 #endif
6658 }
6659
6660 static void
6661 xlate_wc_init(struct xlate_ctx *ctx)
6662 {
6663 flow_wildcards_init_catchall(ctx->wc);
6664
6665 /* Some fields we consider to always be examined. */
6666 WC_MASK_FIELD(ctx->wc, packet_type);
6667 WC_MASK_FIELD(ctx->wc, in_port);
6668 if (is_ethernet(&ctx->xin->flow, NULL)) {
6669 WC_MASK_FIELD(ctx->wc, dl_type);
6670 }
6671 if (is_ip_any(&ctx->xin->flow)) {
6672 WC_MASK_FIELD_MASK(ctx->wc, nw_frag, FLOW_NW_FRAG_MASK);
6673 }
6674
6675 if (ctx->xbridge->support.odp.recirc) {
6676 /* Always exactly match recirc_id when datapath supports
6677 * recirculation. */
6678 WC_MASK_FIELD(ctx->wc, recirc_id);
6679 }
6680
6681 if (ctx->xbridge->netflow) {
6682 netflow_mask_wc(&ctx->xin->flow, ctx->wc);
6683 }
6684
6685 tnl_wc_init(&ctx->xin->flow, ctx->wc);
6686 }
6687
6688 static void
6689 xlate_wc_finish(struct xlate_ctx *ctx)
6690 {
6691 int i;
6692
6693 /* Clear the metadata and register wildcard masks, because we won't
6694 * use non-header fields as part of the cache. */
6695 flow_wildcards_clear_non_packet_fields(ctx->wc);
6696
6697 /* Wildcard ethernet fields if the original packet type was not
6698 * Ethernet. */
6699 if (ctx->xin->upcall_flow->packet_type != htonl(PT_ETH)) {
6700 ctx->wc->masks.dl_dst = eth_addr_zero;
6701 ctx->wc->masks.dl_src = eth_addr_zero;
6702 ctx->wc->masks.dl_type = 0;
6703 }
6704
6705 /* ICMPv4 and ICMPv6 have 8-bit "type" and "code" fields. struct flow
6706 * uses the low 8 bits of the 16-bit tp_src and tp_dst members to
6707 * represent these fields. The datapath interface, on the other hand,
6708 * represents them with just 8 bits each. This means that if the high
6709 * 8 bits of the masks for these fields somehow become set, then they
6710 * will get chopped off by a round trip through the datapath, and
6711 * revalidation will spot that as an inconsistency and delete the flow.
6712 * Avoid the problem here by making sure that only the low 8 bits of
6713 * either field can be unwildcarded for ICMP.
6714 */
6715 if (is_icmpv4(&ctx->xin->flow, NULL) || is_icmpv6(&ctx->xin->flow, NULL)) {
6716 ctx->wc->masks.tp_src &= htons(UINT8_MAX);
6717 ctx->wc->masks.tp_dst &= htons(UINT8_MAX);
6718 }
6719 /* VLAN_TCI CFI bit must be matched if any of the TCI is matched. */
6720 for (i = 0; i < FLOW_MAX_VLAN_HEADERS; i++) {
6721 if (ctx->wc->masks.vlans[i].tci) {
6722 ctx->wc->masks.vlans[i].tci |= htons(VLAN_CFI);
6723 }
6724 }
6725
6726 /* The classifier might return masks that match on tp_src and tp_dst even
6727 * for later fragments. This happens because there might be flows that
6728 * match on tp_src or tp_dst without matching on the frag bits, because
6729 * it is not a prerequisite for OpenFlow. Since it is a prerequisite for
6730 * datapath flows and since tp_src and tp_dst are always going to be 0,
6731 * wildcard the fields here. */
6732 if (ctx->xin->flow.nw_frag & FLOW_NW_FRAG_LATER) {
6733 ctx->wc->masks.tp_src = 0;
6734 ctx->wc->masks.tp_dst = 0;
6735 }
6736 }
6737
6738 /* Translates the flow, actions, or rule in 'xin' into datapath actions in
6739 * 'xout'.
6740 * The caller must take responsibility for eventually freeing 'xout', with
6741 * xlate_out_uninit().
6742 * Returns 'XLATE_OK' if translation was successful. In case of an error an
6743 * empty set of actions will be returned in 'xin->odp_actions' (if non-NULL),
6744 * so that most callers may ignore the return value and transparently install a
6745 * drop flow when the translation fails. */
6746 enum xlate_error
6747 xlate_actions(struct xlate_in *xin, struct xlate_out *xout)
6748 {
6749 *xout = (struct xlate_out) {
6750 .slow = 0,
6751 .recircs = RECIRC_REFS_EMPTY_INITIALIZER,
6752 };
6753
6754 struct xlate_cfg *xcfg = ovsrcu_get(struct xlate_cfg *, &xcfgp);
6755 struct xbridge *xbridge = xbridge_lookup(xcfg, xin->ofproto);
6756 if (!xbridge) {
6757 return XLATE_BRIDGE_NOT_FOUND;
6758 }
6759
6760 struct flow *flow = &xin->flow;
6761
6762 uint8_t stack_stub[1024];
6763 uint64_t action_set_stub[1024 / 8];
6764 uint64_t frozen_actions_stub[1024 / 8];
6765 uint64_t actions_stub[256 / 8];
6766 struct ofpbuf scratch_actions = OFPBUF_STUB_INITIALIZER(actions_stub);
6767 struct xlate_ctx ctx = {
6768 .xin = xin,
6769 .xout = xout,
6770 .base_flow = *flow,
6771 .orig_tunnel_ipv6_dst = flow_tnl_dst(&flow->tunnel),
6772 .xbridge = xbridge,
6773 .stack = OFPBUF_STUB_INITIALIZER(stack_stub),
6774 .rule = xin->rule,
6775 .wc = (xin->wc
6776 ? xin->wc
6777 : &(struct flow_wildcards) { .masks = { .dl_type = 0 } }),
6778 .odp_actions = xin->odp_actions ? xin->odp_actions : &scratch_actions,
6779
6780 .depth = xin->depth,
6781 .resubmits = xin->resubmits,
6782 .in_group = false,
6783 .in_action_set = false,
6784 .in_packet_out = xin->in_packet_out,
6785 .pending_encap = false,
6786 .encap_data = NULL,
6787
6788 .table_id = 0,
6789 .rule_cookie = OVS_BE64_MAX,
6790 .orig_skb_priority = flow->skb_priority,
6791 .sflow_n_outputs = 0,
6792 .sflow_odp_port = 0,
6793 .nf_output_iface = NF_OUT_DROP,
6794 .exit = false,
6795 .error = XLATE_OK,
6796 .mirrors = 0,
6797
6798 .freezing = false,
6799 .recirc_update_dp_hash = false,
6800 .frozen_actions = OFPBUF_STUB_INITIALIZER(frozen_actions_stub),
6801 .pause = NULL,
6802
6803 .was_mpls = false,
6804 .conntracked = false,
6805
6806 .ct_nat_action = NULL,
6807
6808 .action_set_has_group = false,
6809 .action_set = OFPBUF_STUB_INITIALIZER(action_set_stub),
6810 };
6811
6812 /* 'base_flow' reflects the packet as it came in, but we need it to reflect
6813 * the packet as the datapath will treat it for output actions. Our
6814 * datapath doesn't retain tunneling information without us re-setting
6815 * it, so clear the tunnel data.
6816 */
6817
6818 memset(&ctx.base_flow.tunnel, 0, sizeof ctx.base_flow.tunnel);
6819
6820 ofpbuf_reserve(ctx.odp_actions, NL_A_U32_SIZE);
6821 xlate_wc_init(&ctx);
6822
6823 COVERAGE_INC(xlate_actions);
6824
6825 xin->trace = xlate_report(&ctx, OFT_BRIDGE, "bridge(\"%s\")",
6826 xbridge->name);
6827 if (xin->frozen_state) {
6828 const struct frozen_state *state = xin->frozen_state;
6829
6830 struct ovs_list *old_trace = xin->trace;
6831 xin->trace = xlate_report(&ctx, OFT_THAW, "thaw");
6832
6833 if (xin->ofpacts_len > 0 || ctx.rule) {
6834 xlate_report_error(&ctx, "Recirculation conflict (%s)!",
6835 xin->ofpacts_len ? "actions" : "rule");
6836 ctx.error = XLATE_RECIRCULATION_CONFLICT;
6837 goto exit;
6838 }
6839
6840 /* Set the bridge for post-recirculation processing if needed. */
6841 if (!uuid_equals(&ctx.xbridge->ofproto->uuid, &state->ofproto_uuid)) {
6842 const struct xbridge *new_bridge
6843 = xbridge_lookup_by_uuid(xcfg, &state->ofproto_uuid);
6844
6845 if (OVS_UNLIKELY(!new_bridge)) {
6846 /* Drop the packet if the bridge cannot be found. */
6847 xlate_report_error(&ctx, "Frozen bridge no longer exists.");
6848 ctx.error = XLATE_BRIDGE_NOT_FOUND;
6849 xin->trace = old_trace;
6850 goto exit;
6851 }
6852 ctx.xbridge = new_bridge;
6853 /* The bridge is now known so obtain its table version. */
6854 ctx.xin->tables_version
6855 = ofproto_dpif_get_tables_version(ctx.xbridge->ofproto);
6856 }
6857
6858 /* Set the thawed table id. Note: A table lookup is done only if there
6859 * are no frozen actions. */
6860 ctx.table_id = state->table_id;
6861 xlate_report(&ctx, OFT_THAW,
6862 "Resuming from table %"PRIu8, ctx.table_id);
6863
6864 ctx.conntracked = state->conntracked;
6865 if (!state->conntracked) {
6866 clear_conntrack(&ctx);
6867 }
6868
6869 /* Restore pipeline metadata. May change flow's in_port and other
6870 * metadata to the values that existed when freezing was triggered. */
6871 frozen_metadata_to_flow(&state->metadata, flow);
6872
6873 /* Restore stack, if any. */
6874 if (state->stack) {
6875 ofpbuf_put(&ctx.stack, state->stack, state->stack_size);
6876 }
6877
6878 /* Restore mirror state. */
6879 ctx.mirrors = state->mirrors;
6880
6881 /* Restore action set, if any. */
6882 if (state->action_set_len) {
6883 xlate_report_actions(&ctx, OFT_THAW, "Restoring action set",
6884 state->action_set, state->action_set_len);
6885
6886 flow->actset_output = OFPP_UNSET;
6887 xlate_write_actions__(&ctx, state->action_set,
6888 state->action_set_len);
6889 }
6890
6891 /* Restore frozen actions. If there are no actions, processing will
6892 * start with a lookup in the table set above. */
6893 xin->ofpacts = state->ofpacts;
6894 xin->ofpacts_len = state->ofpacts_len;
6895 if (state->ofpacts_len) {
6896 xlate_report_actions(&ctx, OFT_THAW, "Restoring actions",
6897 xin->ofpacts, xin->ofpacts_len);
6898 }
6899
6900 xin->trace = old_trace;
6901 } else if (OVS_UNLIKELY(flow->recirc_id)) {
6902 xlate_report_error(&ctx,
6903 "Recirculation context not found for ID %"PRIx32,
6904 flow->recirc_id);
6905 ctx.error = XLATE_NO_RECIRCULATION_CONTEXT;
6906 goto exit;
6907 }
6908
6909 /* Tunnel metadata in udpif format must be normalized before translation. */
6910 if (flow->tunnel.flags & FLOW_TNL_F_UDPIF) {
6911 const struct tun_table *tun_tab = ofproto_get_tun_tab(
6912 &ctx.xbridge->ofproto->up);
6913 int err;
6914
6915 err = tun_metadata_from_geneve_udpif(tun_tab, &xin->upcall_flow->tunnel,
6916 &xin->upcall_flow->tunnel,
6917 &flow->tunnel);
6918 if (err) {
6919 xlate_report_error(&ctx, "Invalid Geneve tunnel metadata");
6920 ctx.error = XLATE_INVALID_TUNNEL_METADATA;
6921 goto exit;
6922 }
6923 } else if (!flow->tunnel.metadata.tab) {
6924 /* If the original flow did not come in on a tunnel, then it won't have
6925 * FLOW_TNL_F_UDPIF set. However, we still need to have a metadata
6926 * table in case we generate tunnel actions. */
6927 flow->tunnel.metadata.tab = ofproto_get_tun_tab(
6928 &ctx.xbridge->ofproto->up);
6929 }
6930 ctx.wc->masks.tunnel.metadata.tab = flow->tunnel.metadata.tab;
6931
6932 /* Get the proximate input port of the packet. (If xin->frozen_state,
6933 * flow->in_port is the ultimate input port of the packet.) */
6934 struct xport *in_port = get_ofp_port(xbridge,
6935 ctx.base_flow.in_port.ofp_port);
6936
6937 if (flow->packet_type != htonl(PT_ETH) && in_port &&
6938 in_port->pt_mode == NETDEV_PT_LEGACY_L3 && ctx.table_id == 0) {
6939 /* Add dummy Ethernet header to non-L2 packet if it's coming from a
6940 * L3 port. So all packets will be L2 packets for lookup.
6941 * The dl_type has already been set from the packet_type. */
6942 flow->packet_type = htonl(PT_ETH);
6943 flow->dl_src = eth_addr_zero;
6944 flow->dl_dst = eth_addr_zero;
6945 ctx.pending_encap = true;
6946 }
6947
6948 if (!xin->ofpacts && !ctx.rule) {
6949 ctx.rule = rule_dpif_lookup_from_table(
6950 ctx.xbridge->ofproto, ctx.xin->tables_version, flow, ctx.wc,
6951 ctx.xin->resubmit_stats, &ctx.table_id,
6952 flow->in_port.ofp_port, true, true, ctx.xin->xcache);
6953 if (ctx.xin->resubmit_stats) {
6954 rule_dpif_credit_stats(ctx.rule, ctx.xin->resubmit_stats);
6955 }
6956 if (ctx.xin->xcache) {
6957 struct xc_entry *entry;
6958
6959 entry = xlate_cache_add_entry(ctx.xin->xcache, XC_RULE);
6960 entry->rule = ctx.rule;
6961 ofproto_rule_ref(&ctx.rule->up);
6962 }
6963
6964 xlate_report_table(&ctx, ctx.rule, ctx.table_id);
6965 }
6966
6967 /* Tunnel stats only for not-thawed packets. */
6968 if (!xin->frozen_state && in_port && in_port->is_tunnel) {
6969 if (ctx.xin->resubmit_stats) {
6970 netdev_vport_inc_rx(in_port->netdev, ctx.xin->resubmit_stats);
6971 if (in_port->bfd) {
6972 bfd_account_rx(in_port->bfd, ctx.xin->resubmit_stats);
6973 }
6974 }
6975 if (ctx.xin->xcache) {
6976 struct xc_entry *entry;
6977
6978 entry = xlate_cache_add_entry(ctx.xin->xcache, XC_NETDEV);
6979 entry->dev.rx = netdev_ref(in_port->netdev);
6980 entry->dev.bfd = bfd_ref(in_port->bfd);
6981 }
6982 }
6983
6984 if (!xin->frozen_state && process_special(&ctx, in_port)) {
6985 /* process_special() did all the processing for this packet.
6986 *
6987 * We do not perform special processing on thawed packets, since that
6988 * was done before they were frozen and should not be redone. */
6989 } else if (in_port && in_port->xbundle
6990 && xbundle_mirror_out(xbridge, in_port->xbundle)) {
6991 xlate_report_error(&ctx, "dropping packet received on port "
6992 "%s, which is reserved exclusively for mirroring",
6993 in_port->xbundle->name);
6994 } else {
6995 /* Sampling is done on initial reception; don't redo after thawing. */
6996 unsigned int user_cookie_offset = 0;
6997 if (!xin->frozen_state) {
6998 user_cookie_offset = compose_sflow_action(&ctx);
6999 compose_ipfix_action(&ctx, ODPP_NONE);
7000 }
7001 size_t sample_actions_len = ctx.odp_actions->size;
7002
7003 if (tnl_process_ecn(flow)
7004 && (!in_port || may_receive(in_port, &ctx))) {
7005 const struct ofpact *ofpacts;
7006 size_t ofpacts_len;
7007
7008 if (xin->ofpacts) {
7009 ofpacts = xin->ofpacts;
7010 ofpacts_len = xin->ofpacts_len;
7011 } else if (ctx.rule) {
7012 const struct rule_actions *actions
7013 = rule_get_actions(&ctx.rule->up);
7014 ofpacts = actions->ofpacts;
7015 ofpacts_len = actions->ofpacts_len;
7016 ctx.rule_cookie = ctx.rule->up.flow_cookie;
7017 } else {
7018 OVS_NOT_REACHED();
7019 }
7020
7021 mirror_ingress_packet(&ctx);
7022 do_xlate_actions(ofpacts, ofpacts_len, &ctx, true);
7023 if (ctx.error) {
7024 goto exit;
7025 }
7026
7027 /* We've let OFPP_NORMAL and the learning action look at the
7028 * packet, so cancel all actions and freezing if forwarding is
7029 * disabled. */
7030 if (in_port && (!xport_stp_forward_state(in_port) ||
7031 !xport_rstp_forward_state(in_port))) {
7032 ctx.odp_actions->size = sample_actions_len;
7033 ctx_cancel_freeze(&ctx);
7034 ofpbuf_clear(&ctx.action_set);
7035 }
7036
7037 if (!ctx.freezing) {
7038 xlate_action_set(&ctx);
7039 }
7040 if (ctx.freezing) {
7041 finish_freezing(&ctx);
7042 }
7043 }
7044
7045 /* Output only fully processed packets. */
7046 if (!ctx.freezing
7047 && xbridge->has_in_band
7048 && in_band_must_output_to_local_port(flow)
7049 && !actions_output_to_local_port(&ctx)) {
7050 compose_output_action(&ctx, OFPP_LOCAL, NULL, false, false);
7051 }
7052
7053 if (user_cookie_offset) {
7054 fix_sflow_action(&ctx, user_cookie_offset);
7055 }
7056 }
7057
7058 if (nl_attr_oversized(ctx.odp_actions->size)) {
7059 /* These datapath actions are too big for a Netlink attribute, so we
7060 * can't hand them to the kernel directly. dpif_execute() can execute
7061 * them one by one with help, so just mark the result as SLOW_ACTION to
7062 * prevent the flow from being installed. */
7063 COVERAGE_INC(xlate_actions_oversize);
7064 ctx.xout->slow |= SLOW_ACTION;
7065 } else if (too_many_output_actions(ctx.odp_actions)) {
7066 COVERAGE_INC(xlate_actions_too_many_output);
7067 ctx.xout->slow |= SLOW_ACTION;
7068 }
7069
7070 /* Update NetFlow for non-frozen traffic. */
7071 if (xbridge->netflow && !xin->frozen_state) {
7072 if (ctx.xin->resubmit_stats) {
7073 netflow_flow_update(xbridge->netflow, flow,
7074 ctx.nf_output_iface,
7075 ctx.xin->resubmit_stats);
7076 }
7077 if (ctx.xin->xcache) {
7078 struct xc_entry *entry;
7079
7080 entry = xlate_cache_add_entry(ctx.xin->xcache, XC_NETFLOW);
7081 entry->nf.netflow = netflow_ref(xbridge->netflow);
7082 entry->nf.flow = xmemdup(flow, sizeof *flow);
7083 entry->nf.iface = ctx.nf_output_iface;
7084 }
7085 }
7086
7087 /* Translate tunnel metadata masks to udpif format if necessary. */
7088 if (xin->upcall_flow->tunnel.flags & FLOW_TNL_F_UDPIF) {
7089 if (ctx.wc->masks.tunnel.metadata.present.map) {
7090 const struct flow_tnl *upcall_tnl = &xin->upcall_flow->tunnel;
7091 struct geneve_opt opts[TLV_TOT_OPT_SIZE /
7092 sizeof(struct geneve_opt)];
7093
7094 tun_metadata_to_geneve_udpif_mask(&flow->tunnel,
7095 &ctx.wc->masks.tunnel,
7096 upcall_tnl->metadata.opts.gnv,
7097 upcall_tnl->metadata.present.len,
7098 opts);
7099 memset(&ctx.wc->masks.tunnel.metadata, 0,
7100 sizeof ctx.wc->masks.tunnel.metadata);
7101 memcpy(&ctx.wc->masks.tunnel.metadata.opts.gnv, opts,
7102 upcall_tnl->metadata.present.len);
7103 }
7104 ctx.wc->masks.tunnel.metadata.present.len = 0xff;
7105 ctx.wc->masks.tunnel.metadata.tab = NULL;
7106 ctx.wc->masks.tunnel.flags |= FLOW_TNL_F_UDPIF;
7107 } else if (!xin->upcall_flow->tunnel.metadata.tab) {
7108 /* If we didn't have options in UDPIF format and didn't have an existing
7109 * metadata table, then it means that there were no options at all when
7110 * we started processing and any wildcards we picked up were from
7111 * action generation. Without options on the incoming packet, wildcards
7112 * aren't meaningful. To avoid them possibly getting misinterpreted,
7113 * just clear everything. */
7114 if (ctx.wc->masks.tunnel.metadata.present.map) {
7115 memset(&ctx.wc->masks.tunnel.metadata, 0,
7116 sizeof ctx.wc->masks.tunnel.metadata);
7117 } else {
7118 ctx.wc->masks.tunnel.metadata.tab = NULL;
7119 }
7120 }
7121
7122 xlate_wc_finish(&ctx);
7123
7124 exit:
7125 /* Reset the table to what it was when we came in. If we only fetched
7126 * it locally, then it has no meaning outside of flow translation. */
7127 flow->tunnel.metadata.tab = xin->upcall_flow->tunnel.metadata.tab;
7128
7129 ofpbuf_uninit(&ctx.stack);
7130 ofpbuf_uninit(&ctx.action_set);
7131 ofpbuf_uninit(&ctx.frozen_actions);
7132 ofpbuf_uninit(&scratch_actions);
7133 ofpbuf_delete(ctx.encap_data);
7134
7135 /* Make sure we return a "drop flow" in case of an error. */
7136 if (ctx.error) {
7137 xout->slow = 0;
7138 if (xin->odp_actions) {
7139 ofpbuf_clear(xin->odp_actions);
7140 }
7141 }
7142 return ctx.error;
7143 }
7144
7145 enum ofperr
7146 xlate_resume(struct ofproto_dpif *ofproto,
7147 const struct ofputil_packet_in_private *pin,
7148 struct ofpbuf *odp_actions,
7149 enum slow_path_reason *slow)
7150 {
7151 struct dp_packet packet;
7152 dp_packet_use_const(&packet, pin->base.packet,
7153 pin->base.packet_len);
7154
7155 struct flow flow;
7156 flow_extract(&packet, &flow);
7157
7158 struct xlate_in xin;
7159 xlate_in_init(&xin, ofproto, ofproto_dpif_get_tables_version(ofproto),
7160 &flow, 0, NULL, ntohs(flow.tcp_flags),
7161 &packet, NULL, odp_actions);
7162
7163 struct ofpact_note noop;
7164 ofpact_init_NOTE(&noop);
7165 noop.length = 0;
7166
7167 bool any_actions = pin->actions_len > 0;
7168 struct frozen_state state = {
7169 .table_id = 0, /* Not the table where NXAST_PAUSE was executed. */
7170 .ofproto_uuid = pin->bridge,
7171 .stack = pin->stack,
7172 .stack_size = pin->stack_size,
7173 .mirrors = pin->mirrors,
7174 .conntracked = pin->conntracked,
7175
7176 /* When there are no actions, xlate_actions() will search the flow
7177 * table. We don't want it to do that (we want it to resume), so
7178 * supply a no-op action if there aren't any.
7179 *
7180 * (We can't necessarily avoid translating actions entirely if there
7181 * aren't any actions, because there might be some finishing-up to do
7182 * at the end of the pipeline, and we don't check for those
7183 * conditions.) */
7184 .ofpacts = any_actions ? pin->actions : &noop.ofpact,
7185 .ofpacts_len = any_actions ? pin->actions_len : sizeof noop,
7186
7187 .action_set = pin->action_set,
7188 .action_set_len = pin->action_set_len,
7189 };
7190 frozen_metadata_from_flow(&state.metadata,
7191 &pin->base.flow_metadata.flow);
7192 xin.frozen_state = &state;
7193
7194 struct xlate_out xout;
7195 enum xlate_error error = xlate_actions(&xin, &xout);
7196 *slow = xout.slow;
7197 xlate_out_uninit(&xout);
7198
7199 /* xlate_actions() can generate a number of errors, but only
7200 * XLATE_BRIDGE_NOT_FOUND really stands out to me as one that we should be
7201 * sure to report over OpenFlow. The others could come up in packet-outs
7202 * or regular flow translation and I don't think that it's going to be too
7203 * useful to report them to the controller. */
7204 return error == XLATE_BRIDGE_NOT_FOUND ? OFPERR_NXR_STALE : 0;
7205 }
7206
7207 /* Sends 'packet' out 'ofport'. If 'port' is a tunnel and that tunnel type
7208 * supports a notion of an OAM flag, sets it if 'oam' is true.
7209 * May modify 'packet'.
7210 * Returns 0 if successful, otherwise a positive errno value. */
7211 int
7212 xlate_send_packet(const struct ofport_dpif *ofport, bool oam,
7213 struct dp_packet *packet)
7214 {
7215 struct xlate_cfg *xcfg = ovsrcu_get(struct xlate_cfg *, &xcfgp);
7216 struct xport *xport;
7217 uint64_t ofpacts_stub[1024 / 8];
7218 struct ofpbuf ofpacts;
7219 struct flow flow;
7220
7221 ofpbuf_use_stack(&ofpacts, ofpacts_stub, sizeof ofpacts_stub);
7222 /* Use OFPP_NONE as the in_port to avoid special packet processing. */
7223 flow_extract(packet, &flow);
7224 flow.in_port.ofp_port = OFPP_NONE;
7225
7226 xport = xport_lookup(xcfg, ofport);
7227 if (!xport) {
7228 return EINVAL;
7229 }
7230
7231 if (oam) {
7232 const ovs_be16 flag = htons(NX_TUN_FLAG_OAM);
7233 ofpact_put_set_field(&ofpacts, mf_from_id(MFF_TUN_FLAGS),
7234 &flag, &flag);
7235 }
7236
7237 ofpact_put_OUTPUT(&ofpacts)->port = xport->ofp_port;
7238
7239 /* Actions here are not referring to anything versionable (flow tables or
7240 * groups) so we don't need to worry about the version here. */
7241 return ofproto_dpif_execute_actions(xport->xbridge->ofproto,
7242 OVS_VERSION_MAX, &flow, NULL,
7243 ofpacts.data, ofpacts.size, packet);
7244 }
7245
7246 void
7247 xlate_mac_learning_update(const struct ofproto_dpif *ofproto,
7248 ofp_port_t in_port, struct eth_addr dl_src,
7249 int vlan, bool is_grat_arp)
7250 {
7251 struct xlate_cfg *xcfg = ovsrcu_get(struct xlate_cfg *, &xcfgp);
7252 struct xbridge *xbridge;
7253 struct xbundle *xbundle;
7254
7255 xbridge = xbridge_lookup(xcfg, ofproto);
7256 if (!xbridge) {
7257 return;
7258 }
7259
7260 xbundle = lookup_input_bundle__(xbridge, in_port, NULL);
7261 if (!xbundle) {
7262 return;
7263 }
7264
7265 update_learning_table__(xbridge, xbundle, dl_src, vlan, is_grat_arp);
7266 }
7267
7268 void
7269 xlate_set_support(const struct ofproto_dpif *ofproto,
7270 const struct dpif_backer_support *support)
7271 {
7272 struct xlate_cfg *xcfg = ovsrcu_get(struct xlate_cfg *, &xcfgp);
7273 struct xbridge *xbridge = xbridge_lookup(xcfg, ofproto);
7274
7275 if (xbridge) {
7276 xbridge->support = *support;
7277 }
7278 }