]> git.proxmox.com Git - mirror_ovs.git/blob - ofproto/ofproto-dpif-xlate.c
treewide: Convert leading tabs to spaces.
[mirror_ovs.git] / ofproto / ofproto-dpif-xlate.c
1 /* Copyright (c) 2009, 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017 Nicira, Inc.
2 *
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License. */
14
15 #include <config.h>
16
17 #include "ofproto/ofproto-dpif-xlate.h"
18
19 #include <errno.h>
20 #include <sys/types.h>
21 #include <netinet/in.h>
22 #include <arpa/inet.h>
23 #include <net/if.h>
24 #include <sys/socket.h>
25
26 #include "bfd.h"
27 #include "bitmap.h"
28 #include "bond.h"
29 #include "bundle.h"
30 #include "byte-order.h"
31 #include "cfm.h"
32 #include "connmgr.h"
33 #include "coverage.h"
34 #include "csum.h"
35 #include "dp-packet.h"
36 #include "dpif.h"
37 #include "in-band.h"
38 #include "lacp.h"
39 #include "learn.h"
40 #include "mac-learning.h"
41 #include "mcast-snooping.h"
42 #include "multipath.h"
43 #include "netdev-vport.h"
44 #include "netlink.h"
45 #include "nx-match.h"
46 #include "odp-execute.h"
47 #include "ofproto/ofproto-dpif-ipfix.h"
48 #include "ofproto/ofproto-dpif-mirror.h"
49 #include "ofproto/ofproto-dpif-monitor.h"
50 #include "ofproto/ofproto-dpif-sflow.h"
51 #include "ofproto/ofproto-dpif-trace.h"
52 #include "ofproto/ofproto-dpif-xlate-cache.h"
53 #include "ofproto/ofproto-dpif.h"
54 #include "ofproto/ofproto-provider.h"
55 #include "openvswitch/dynamic-string.h"
56 #include "openvswitch/meta-flow.h"
57 #include "openvswitch/list.h"
58 #include "openvswitch/ofp-actions.h"
59 #include "openvswitch/ofp-ed-props.h"
60 #include "openvswitch/vlog.h"
61 #include "ovs-lldp.h"
62 #include "ovs-router.h"
63 #include "packets.h"
64 #include "tnl-neigh-cache.h"
65 #include "tnl-ports.h"
66 #include "tunnel.h"
67 #include "util.h"
68 #include "uuid.h"
69
70 COVERAGE_DEFINE(xlate_actions);
71 COVERAGE_DEFINE(xlate_actions_oversize);
72 COVERAGE_DEFINE(xlate_actions_too_many_output);
73
74 VLOG_DEFINE_THIS_MODULE(ofproto_dpif_xlate);
75
76 /* Maximum depth of flow table recursion (due to resubmit actions) in a
77 * flow translation.
78 *
79 * The goal of limiting the depth of resubmits is to ensure that flow
80 * translation eventually terminates. Only resubmits to the same table or an
81 * earlier table count against the maximum depth. This is because resubmits to
82 * strictly monotonically increasing table IDs will eventually terminate, since
83 * any OpenFlow switch has a finite number of tables. OpenFlow tables are most
84 * commonly traversed in numerically increasing order, so this limit has little
85 * effect on conventionally designed OpenFlow pipelines.
86 *
87 * Outputs to patch ports and to groups also count against the depth limit. */
88 #define MAX_DEPTH 64
89
90 /* Maximum number of resubmit actions in a flow translation, whether they are
91 * recursive or not. */
92 #define MAX_RESUBMITS (MAX_DEPTH * MAX_DEPTH)
93
94 /* The structure holds an array of IP addresses assigned to a bridge and the
95 * number of elements in the array. These data are mutable and are evaluated
96 * when ARP or Neighbor Advertisement packets received on a native tunnel
97 * port are xlated. So 'ref_cnt' and RCU are used for synchronization. */
98 struct xbridge_addr {
99 struct in6_addr *addr; /* Array of IP addresses of xbridge. */
100 int n_addr; /* Number of IP addresses. */
101 struct ovs_refcount ref_cnt;
102 };
103
104 struct xbridge {
105 struct hmap_node hmap_node; /* Node in global 'xbridges' map. */
106 struct ofproto_dpif *ofproto; /* Key in global 'xbridges' map. */
107
108 struct ovs_list xbundles; /* Owned xbundles. */
109 struct hmap xports; /* Indexed by ofp_port. */
110
111 char *name; /* Name used in log messages. */
112 struct dpif *dpif; /* Datapath interface. */
113 struct mac_learning *ml; /* Mac learning handle. */
114 struct mcast_snooping *ms; /* Multicast Snooping handle. */
115 struct mbridge *mbridge; /* Mirroring. */
116 struct dpif_sflow *sflow; /* SFlow handle, or null. */
117 struct dpif_ipfix *ipfix; /* Ipfix handle, or null. */
118 struct netflow *netflow; /* Netflow handle, or null. */
119 struct stp *stp; /* STP or null if disabled. */
120 struct rstp *rstp; /* RSTP or null if disabled. */
121
122 bool has_in_band; /* Bridge has in band control? */
123 bool forward_bpdu; /* Bridge forwards STP BPDUs? */
124
125 /* Datapath feature support. */
126 struct dpif_backer_support support;
127
128 struct xbridge_addr *addr;
129 };
130
131 struct xbundle {
132 struct hmap_node hmap_node; /* In global 'xbundles' map. */
133 struct ofbundle *ofbundle; /* Key in global 'xbundles' map. */
134
135 struct ovs_list list_node; /* In parent 'xbridges' list. */
136 struct xbridge *xbridge; /* Parent xbridge. */
137
138 struct ovs_list xports; /* Contains "struct xport"s. */
139
140 char *name; /* Name used in log messages. */
141 struct bond *bond; /* Nonnull iff more than one port. */
142 struct lacp *lacp; /* LACP handle or null. */
143
144 enum port_vlan_mode vlan_mode; /* VLAN mode. */
145 uint16_t qinq_ethtype; /* Ethertype of dot1q-tunnel interface
146 * either 0x8100 or 0x88a8. */
147 int vlan; /* -1=trunk port, else a 12-bit VLAN ID. */
148 unsigned long *trunks; /* Bitmap of trunked VLANs, if 'vlan' == -1.
149 * NULL if all VLANs are trunked. */
150 unsigned long *cvlans; /* Bitmap of allowed customer vlans,
151 * NULL if all VLANs are allowed */
152 bool use_priority_tags; /* Use 802.1p tag for frames in VLAN 0? */
153 bool floodable; /* No port has OFPUTIL_PC_NO_FLOOD set? */
154 bool protected; /* Protected port mode */
155 };
156
157 struct xport {
158 struct hmap_node hmap_node; /* Node in global 'xports' map. */
159 struct ofport_dpif *ofport; /* Key in global 'xports map. */
160
161 struct hmap_node ofp_node; /* Node in parent xbridge 'xports' map. */
162 ofp_port_t ofp_port; /* Key in parent xbridge 'xports' map. */
163
164 struct hmap_node uuid_node; /* Node in global 'xports_uuid' map. */
165 struct uuid uuid; /* Key in global 'xports_uuid' map. */
166
167 odp_port_t odp_port; /* Datapath port number or ODPP_NONE. */
168
169 struct ovs_list bundle_node; /* In parent xbundle (if it exists). */
170 struct xbundle *xbundle; /* Parent xbundle or null. */
171
172 struct netdev *netdev; /* 'ofport''s netdev. */
173
174 struct xbridge *xbridge; /* Parent bridge. */
175 struct xport *peer; /* Patch port peer or null. */
176
177 enum ofputil_port_config config; /* OpenFlow port configuration. */
178 enum ofputil_port_state state; /* OpenFlow port state. */
179 int stp_port_no; /* STP port number or -1 if not in use. */
180 struct rstp_port *rstp_port; /* RSTP port or null. */
181
182 struct hmap skb_priorities; /* Map of 'skb_priority_to_dscp's. */
183
184 bool may_enable; /* May be enabled in bonds. */
185 bool is_tunnel; /* Is a tunnel port. */
186 enum netdev_pt_mode pt_mode; /* packet_type handling. */
187
188 struct cfm *cfm; /* CFM handle or null. */
189 struct bfd *bfd; /* BFD handle or null. */
190 struct lldp *lldp; /* LLDP handle or null. */
191 };
192
193 struct xlate_ctx {
194 struct xlate_in *xin;
195 struct xlate_out *xout;
196
197 struct xlate_cfg *xcfg;
198 const struct xbridge *xbridge;
199
200 /* Flow at the last commit. */
201 struct flow base_flow;
202
203 /* Tunnel IP destination address as received. This is stored separately
204 * as the base_flow.tunnel is cleared on init to reflect the datapath
205 * behavior. Used to make sure not to send tunneled output to ourselves,
206 * which might lead to an infinite loop. This could happen easily
207 * if a tunnel is marked as 'ip_remote=flow', and the flow does not
208 * actually set the tun_dst field. */
209 struct in6_addr orig_tunnel_ipv6_dst;
210
211 /* Stack for the push and pop actions. See comment above nx_stack_push()
212 * in nx-match.c for info on how the stack is stored. */
213 struct ofpbuf stack;
214
215 /* The rule that we are currently translating, or NULL. */
216 struct rule_dpif *rule;
217
218 /* Flow translation populates this with wildcards relevant in translation.
219 * When 'xin->wc' is nonnull, this is the same pointer. When 'xin->wc' is
220 * null, this is a pointer to a temporary buffer. */
221 struct flow_wildcards *wc;
222
223 /* Output buffer for datapath actions. When 'xin->odp_actions' is nonnull,
224 * this is the same pointer. When 'xin->odp_actions' is null, this points
225 * to a scratch ofpbuf. This allows code to add actions to
226 * 'ctx->odp_actions' without worrying about whether the caller really
227 * wants actions. */
228 struct ofpbuf *odp_actions;
229
230 /* Statistics maintained by xlate_table_action().
231 *
232 * These statistics limit the amount of work that a single flow
233 * translation can perform. The goal of the first of these, 'depth', is
234 * primarily to prevent translation from performing an infinite amount of
235 * work. It counts the current depth of nested "resubmit"s (and a few
236 * other activities); when a resubmit returns, it decreases. Resubmits to
237 * tables in strictly monotonically increasing order don't contribute to
238 * 'depth' because they cannot cause a flow translation to take an infinite
239 * amount of time (because the number of tables is finite). Translation
240 * aborts when 'depth' exceeds MAX_DEPTH.
241 *
242 * 'resubmits', on the other hand, prevents flow translation from
243 * performing an extraordinarily large while still finite amount of work.
244 * It counts the total number of resubmits (and a few other activities)
245 * that have been executed. Returning from a resubmit does not affect this
246 * counter. Thus, this limits the amount of work that a particular
247 * translation can perform. Translation aborts when 'resubmits' exceeds
248 * MAX_RESUBMITS (which is much larger than MAX_DEPTH).
249 */
250 int depth; /* Current resubmit nesting depth. */
251 int resubmits; /* Total number of resubmits. */
252 bool in_group; /* Currently translating ofgroup, if true. */
253 bool in_action_set; /* Currently translating action_set, if true. */
254 bool in_packet_out; /* Currently translating a packet_out msg, if
255 * true. */
256 bool pending_encap; /* True when waiting to commit a pending
257 * encap action. */
258 bool pending_decap; /* True when waiting to commit a pending
259 * decap action. */
260 struct ofpbuf *encap_data; /* May contain a pointer to an ofpbuf with
261 * context for the datapath encap action.*/
262
263 uint8_t table_id; /* OpenFlow table ID where flow was found. */
264 ovs_be64 rule_cookie; /* Cookie of the rule being translated. */
265 uint32_t orig_skb_priority; /* Priority when packet arrived. */
266 uint32_t sflow_n_outputs; /* Number of output ports. */
267 odp_port_t sflow_odp_port; /* Output port for composing sFlow action. */
268 ofp_port_t nf_output_iface; /* Output interface index for NetFlow. */
269 bool exit; /* No further actions should be processed. */
270 mirror_mask_t mirrors; /* Bitmap of associated mirrors. */
271 int mirror_snaplen; /* Max size of a mirror packet in byte. */
272
273 /* Freezing Translation
274 * ====================
275 *
276 * At some point during translation, the code may recognize the need to halt
277 * and checkpoint the translation in a way that it can be restarted again
278 * later. We call the checkpointing process "freezing" and the restarting
279 * process "thawing".
280 *
281 * The use cases for freezing are:
282 *
283 * - "Recirculation", where the translation process discovers that it
284 * doesn't have enough information to complete translation without
285 * actually executing the actions that have already been translated,
286 * which provides the additionally needed information. In these
287 * situations, translation freezes translation and assigns the frozen
288 * data a unique "recirculation ID", which it associates with the data
289 * in a table in userspace (see ofproto-dpif-rid.h). It also adds a
290 * OVS_ACTION_ATTR_RECIRC action specifying that ID to the datapath
291 * actions. When a packet hits that action, the datapath looks its
292 * flow up again using the ID. If there's a miss, it comes back to
293 * userspace, which find the recirculation table entry for the ID,
294 * thaws the associated frozen data, and continues translation from
295 * that point given the additional information that is now known.
296 *
297 * The archetypal example is MPLS. As MPLS is implemented in
298 * OpenFlow, the protocol that follows the last MPLS label becomes
299 * known only when that label is popped by an OpenFlow action. That
300 * means that Open vSwitch can't extract the headers beyond the MPLS
301 * labels until the pop action is executed. Thus, at that point
302 * translation uses the recirculation process to extract the headers
303 * beyond the MPLS labels.
304 *
305 * (OVS also uses OVS_ACTION_ATTR_RECIRC to implement hashing for
306 * output to bonds. OVS pre-populates all the datapath flows for bond
307 * output in the datapath, though, which means that the elaborate
308 * process of coming back to userspace for a second round of
309 * translation isn't needed, and so bonds don't follow the above
310 * process.)
311 *
312 * - "Continuation". A continuation is a way for an OpenFlow controller
313 * to interpose on a packet's traversal of the OpenFlow tables. When
314 * the translation process encounters a "controller" action with the
315 * "pause" flag, it freezes translation, serializes the frozen data,
316 * and sends it to an OpenFlow controller. The controller then
317 * examines and possibly modifies the frozen data and eventually sends
318 * it back to the switch, which thaws it and continues translation.
319 *
320 * The main problem of freezing translation is preserving state, so that
321 * when the translation is thawed later it resumes from where it left off,
322 * without disruption. In particular, actions must be preserved as follows:
323 *
324 * - If we're freezing because an action needed more information, the
325 * action that prompted it.
326 *
327 * - Any actions remaining to be translated within the current flow.
328 *
329 * - If translation was frozen within a NXAST_RESUBMIT, then any actions
330 * following the resubmit action. Resubmit actions can be nested, so
331 * this has to go all the way up the control stack.
332 *
333 * - The OpenFlow 1.1+ action set.
334 *
335 * State that actions and flow table lookups can depend on, such as the
336 * following, must also be preserved:
337 *
338 * - Metadata fields (input port, registers, OF1.1+ metadata, ...).
339 *
340 * - The stack used by NXAST_STACK_PUSH and NXAST_STACK_POP actions.
341 *
342 * - The table ID and cookie of the flow being translated at each level
343 * of the control stack, because these can become visible through
344 * OFPAT_CONTROLLER actions (and other ways).
345 *
346 * Translation allows for the control of this state preservation via these
347 * members. When a need to freeze translation is identified, the
348 * translation process:
349 *
350 * 1. Sets 'freezing' to true.
351 *
352 * 2. Sets 'exit' to true to tell later steps that we're exiting from the
353 * translation process.
354 *
355 * 3. Adds an OFPACT_UNROLL_XLATE action to 'frozen_actions', and points
356 * frozen_actions.header to the action to make it easy to find it later.
357 * This action holds the current table ID and cookie so that they can be
358 * restored during a post-recirculation upcall translation.
359 *
360 * 4. Adds the action that prompted recirculation and any actions following
361 * it within the same flow to 'frozen_actions', so that they can be
362 * executed during a post-recirculation upcall translation.
363 *
364 * 5. Returns.
365 *
366 * 6. The action that prompted recirculation might be nested in a stack of
367 * nested "resubmit"s that have actions remaining. Each of these notices
368 * that we're exiting and freezing and responds by adding more
369 * OFPACT_UNROLL_XLATE actions to 'frozen_actions', as necessary,
370 * followed by any actions that were yet unprocessed.
371 *
372 * If we're freezing because of recirculation, the caller generates a
373 * recirculation ID and associates all the state produced by this process
374 * with it. For post-recirculation upcall translation, the caller passes it
375 * back in for the new translation to execute. The process yielded a set of
376 * ofpacts that can be translated directly, so it is not much of a special
377 * case at that point.
378 */
379 bool freezing;
380 bool recirc_update_dp_hash; /* Generated recirculation will be preceded
381 * by datapath HASH action to get an updated
382 * dp_hash after recirculation. */
383 uint32_t dp_hash_alg;
384 uint32_t dp_hash_basis;
385 struct ofpbuf frozen_actions;
386 const struct ofpact_controller *pause;
387
388 /* True if a packet was but is no longer MPLS (due to an MPLS pop action).
389 * This is a trigger for recirculation in cases where translating an action
390 * or looking up a flow requires access to the fields of the packet after
391 * the MPLS label stack that was originally present. */
392 bool was_mpls;
393
394 /* True if conntrack has been performed on this packet during processing
395 * on the current bridge. This is used to determine whether conntrack
396 * state from the datapath should be honored after thawing. */
397 bool conntracked;
398
399 /* Pointer to an embedded NAT action in a conntrack action, or NULL. */
400 struct ofpact_nat *ct_nat_action;
401
402 /* OpenFlow 1.1+ action set.
403 *
404 * 'action_set' accumulates "struct ofpact"s added by OFPACT_WRITE_ACTIONS.
405 * When translation is otherwise complete, ofpacts_execute_action_set()
406 * converts it to a set of "struct ofpact"s that can be translated into
407 * datapath actions. */
408 bool action_set_has_group; /* Action set contains OFPACT_GROUP? */
409 struct ofpbuf action_set; /* Action set. */
410
411 enum xlate_error error; /* Translation failed. */
412 };
413
414 /* Structure to track VLAN manipulation */
415 struct xvlan_single {
416 uint16_t tpid;
417 uint16_t vid;
418 uint16_t pcp;
419 };
420
421 struct xvlan {
422 struct xvlan_single v[FLOW_MAX_VLAN_HEADERS];
423 };
424
425 const char *xlate_strerror(enum xlate_error error)
426 {
427 switch (error) {
428 case XLATE_OK:
429 return "OK";
430 case XLATE_BRIDGE_NOT_FOUND:
431 return "Bridge not found";
432 case XLATE_RECURSION_TOO_DEEP:
433 return "Recursion too deep";
434 case XLATE_TOO_MANY_RESUBMITS:
435 return "Too many resubmits";
436 case XLATE_STACK_TOO_DEEP:
437 return "Stack too deep";
438 case XLATE_NO_RECIRCULATION_CONTEXT:
439 return "No recirculation context";
440 case XLATE_RECIRCULATION_CONFLICT:
441 return "Recirculation conflict";
442 case XLATE_TOO_MANY_MPLS_LABELS:
443 return "Too many MPLS labels";
444 case XLATE_INVALID_TUNNEL_METADATA:
445 return "Invalid tunnel metadata";
446 case XLATE_UNSUPPORTED_PACKET_TYPE:
447 return "Unsupported packet type";
448 }
449 return "Unknown error";
450 }
451
452 static void xlate_action_set(struct xlate_ctx *ctx);
453 static void xlate_commit_actions(struct xlate_ctx *ctx);
454
455 static void
456 patch_port_output(struct xlate_ctx *ctx, const struct xport *in_dev,
457 struct xport *out_dev);
458
459 static void
460 ctx_trigger_freeze(struct xlate_ctx *ctx)
461 {
462 ctx->exit = true;
463 ctx->freezing = true;
464 }
465
466 static void
467 ctx_trigger_recirculate_with_hash(struct xlate_ctx *ctx, uint32_t type,
468 uint32_t basis)
469 {
470 ctx->exit = true;
471 ctx->freezing = true;
472 ctx->recirc_update_dp_hash = true;
473 ctx->dp_hash_alg = type;
474 ctx->dp_hash_basis = basis;
475 }
476
477 static bool
478 ctx_first_frozen_action(const struct xlate_ctx *ctx)
479 {
480 return !ctx->frozen_actions.size;
481 }
482
483 static void
484 ctx_cancel_freeze(struct xlate_ctx *ctx)
485 {
486 if (ctx->freezing) {
487 ctx->freezing = false;
488 ctx->recirc_update_dp_hash = false;
489 ofpbuf_clear(&ctx->frozen_actions);
490 ctx->frozen_actions.header = NULL;
491 }
492 }
493
494 static void finish_freezing(struct xlate_ctx *ctx);
495
496 /* A controller may use OFPP_NONE as the ingress port to indicate that
497 * it did not arrive on a "real" port. 'ofpp_none_bundle' exists for
498 * when an input bundle is needed for validation (e.g., mirroring or
499 * OFPP_NORMAL processing). It is not connected to an 'ofproto' or have
500 * any 'port' structs, so care must be taken when dealing with it. */
501 static struct xbundle ofpp_none_bundle = {
502 .name = "OFPP_NONE",
503 .vlan_mode = PORT_VLAN_TRUNK
504 };
505
506 /* Node in 'xport''s 'skb_priorities' map. Used to maintain a map from
507 * 'priority' (the datapath's term for QoS queue) to the dscp bits which all
508 * traffic egressing the 'ofport' with that priority should be marked with. */
509 struct skb_priority_to_dscp {
510 struct hmap_node hmap_node; /* Node in 'ofport_dpif''s 'skb_priorities'. */
511 uint32_t skb_priority; /* Priority of this queue (see struct flow). */
512
513 uint8_t dscp; /* DSCP bits to mark outgoing traffic with. */
514 };
515
516 /* Xlate config contains hash maps of all bridges, bundles and ports.
517 * Xcfgp contains the pointer to the current xlate configuration.
518 * When the main thread needs to change the configuration, it copies xcfgp to
519 * new_xcfg and edits new_xcfg. This enables the use of RCU locking which
520 * does not block handler and revalidator threads. */
521 struct xlate_cfg {
522 struct hmap xbridges;
523 struct hmap xbundles;
524 struct hmap xports;
525 struct hmap xports_uuid;
526 };
527 static OVSRCU_TYPE(struct xlate_cfg *) xcfgp = OVSRCU_INITIALIZER(NULL);
528 static struct xlate_cfg *new_xcfg = NULL;
529
530 typedef void xlate_actions_handler(const struct ofpact *, size_t ofpacts_len,
531 struct xlate_ctx *, bool);
532 static bool may_receive(const struct xport *, struct xlate_ctx *);
533 static void do_xlate_actions(const struct ofpact *, size_t ofpacts_len,
534 struct xlate_ctx *, bool);
535 static void clone_xlate_actions(const struct ofpact *, size_t ofpacts_len,
536 struct xlate_ctx *, bool);
537 static void xlate_normal(struct xlate_ctx *);
538 static void xlate_table_action(struct xlate_ctx *, ofp_port_t in_port,
539 uint8_t table_id, bool may_packet_in,
540 bool honor_table_miss, bool with_ct_orig,
541 bool is_last_action, xlate_actions_handler *);
542
543 static bool input_vid_is_valid(const struct xlate_ctx *,
544 uint16_t vid, struct xbundle *);
545 static void xvlan_copy(struct xvlan *dst, const struct xvlan *src);
546 static void xvlan_pop(struct xvlan *src);
547 static void xvlan_push_uninit(struct xvlan *src);
548 static void xvlan_extract(const struct flow *, struct xvlan *);
549 static void xvlan_put(struct flow *, const struct xvlan *);
550 static void xvlan_input_translate(const struct xbundle *,
551 const struct xvlan *in,
552 struct xvlan *xvlan);
553 static void xvlan_output_translate(const struct xbundle *,
554 const struct xvlan *xvlan,
555 struct xvlan *out);
556 static void output_normal(struct xlate_ctx *, const struct xbundle *,
557 const struct xvlan *);
558
559 /* Optional bond recirculation parameter to compose_output_action(). */
560 struct xlate_bond_recirc {
561 uint32_t recirc_id; /* !0 Use recirculation instead of output. */
562 uint8_t hash_alg; /* !0 Compute hash for recirc before. */
563 uint32_t hash_basis; /* Compute hash for recirc before. */
564 };
565
566 static void compose_output_action(struct xlate_ctx *, ofp_port_t ofp_port,
567 const struct xlate_bond_recirc *xr,
568 bool is_last_action, bool truncate);
569
570 static struct xbridge *xbridge_lookup(struct xlate_cfg *,
571 const struct ofproto_dpif *);
572 static struct xbridge *xbridge_lookup_by_uuid(struct xlate_cfg *,
573 const struct uuid *);
574 static struct xbundle *xbundle_lookup(struct xlate_cfg *,
575 const struct ofbundle *);
576 static struct xport *xport_lookup(struct xlate_cfg *,
577 const struct ofport_dpif *);
578 static struct xport *xport_lookup_by_uuid(struct xlate_cfg *,
579 const struct uuid *);
580 static struct xport *get_ofp_port(const struct xbridge *, ofp_port_t ofp_port);
581 static struct skb_priority_to_dscp *get_skb_priority(const struct xport *,
582 uint32_t skb_priority);
583 static void clear_skb_priorities(struct xport *);
584 static size_t count_skb_priorities(const struct xport *);
585 static bool dscp_from_skb_priority(const struct xport *, uint32_t skb_priority,
586 uint8_t *dscp);
587
588 static void xlate_xbridge_init(struct xlate_cfg *, struct xbridge *);
589 static void xlate_xbundle_init(struct xlate_cfg *, struct xbundle *);
590 static void xlate_xport_init(struct xlate_cfg *, struct xport *);
591 static void xlate_xbridge_set(struct xbridge *, struct dpif *,
592 const struct mac_learning *, struct stp *,
593 struct rstp *, const struct mcast_snooping *,
594 const struct mbridge *,
595 const struct dpif_sflow *,
596 const struct dpif_ipfix *,
597 const struct netflow *,
598 bool forward_bpdu, bool has_in_band,
599 const struct dpif_backer_support *,
600 const struct xbridge_addr *);
601 static void xlate_xbundle_set(struct xbundle *xbundle,
602 enum port_vlan_mode vlan_mode,
603 uint16_t qinq_ethtype, int vlan,
604 unsigned long *trunks, unsigned long *cvlans,
605 bool use_priority_tags,
606 const struct bond *bond, const struct lacp *lacp,
607 bool floodable, bool protected);
608 static void xlate_xport_set(struct xport *xport, odp_port_t odp_port,
609 const struct netdev *netdev, const struct cfm *cfm,
610 const struct bfd *bfd, const struct lldp *lldp,
611 int stp_port_no, const struct rstp_port *rstp_port,
612 enum ofputil_port_config config,
613 enum ofputil_port_state state, bool is_tunnel,
614 bool may_enable);
615 static void xlate_xbridge_remove(struct xlate_cfg *, struct xbridge *);
616 static void xlate_xbundle_remove(struct xlate_cfg *, struct xbundle *);
617 static void xlate_xport_remove(struct xlate_cfg *, struct xport *);
618 static void xlate_xbridge_copy(struct xbridge *);
619 static void xlate_xbundle_copy(struct xbridge *, struct xbundle *);
620 static void xlate_xport_copy(struct xbridge *, struct xbundle *,
621 struct xport *);
622 static void xlate_xcfg_free(struct xlate_cfg *);
623 \f
624 /* Tracing helpers. */
625
626 /* If tracing is enabled in 'ctx', creates a new trace node and appends it to
627 * the list of nodes maintained in ctx->xin. The new node has type 'type' and
628 * its text is created from 'format' by treating it as a printf format string.
629 * Returns the list of nodes embedded within the new trace node; ordinarily,
630 * the calleer can ignore this, but it is useful if the caller needs to nest
631 * more trace nodes within the new node.
632 *
633 * If tracing is not enabled, does nothing and returns NULL. */
634 static struct ovs_list * OVS_PRINTF_FORMAT(3, 4)
635 xlate_report(const struct xlate_ctx *ctx, enum oftrace_node_type type,
636 const char *format, ...)
637 {
638 struct ovs_list *subtrace = NULL;
639 if (OVS_UNLIKELY(ctx->xin->trace)) {
640 va_list args;
641 va_start(args, format);
642 char *text = xvasprintf(format, args);
643 subtrace = &oftrace_report(ctx->xin->trace, type, text)->subs;
644 va_end(args);
645 free(text);
646 }
647 return subtrace;
648 }
649
650 /* This is like xlate_report() for errors that are serious enough that we
651 * should log them even if we are not tracing. */
652 static void OVS_PRINTF_FORMAT(2, 3)
653 xlate_report_error(const struct xlate_ctx *ctx, const char *format, ...)
654 {
655 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
656 if (!OVS_UNLIKELY(ctx->xin->trace)
657 && (!ctx->xin->packet || VLOG_DROP_WARN(&rl))) {
658 return;
659 }
660
661 struct ds s = DS_EMPTY_INITIALIZER;
662 va_list args;
663 va_start(args, format);
664 ds_put_format_valist(&s, format, args);
665 va_end(args);
666
667 if (ctx->xin->trace) {
668 oftrace_report(ctx->xin->trace, OFT_ERROR, ds_cstr(&s));
669 } else {
670 ds_put_cstr(&s, " while processing ");
671 flow_format(&s, &ctx->base_flow, NULL);
672 ds_put_format(&s, " on bridge %s", ctx->xbridge->name);
673 VLOG_WARN("%s", ds_cstr(&s));
674 }
675 ds_destroy(&s);
676 }
677
678 /* This is like xlate_report() for messages that should be logged at debug
679 * level (even if we are not tracing) because they can be valuable for
680 * debugging. */
681 static void OVS_PRINTF_FORMAT(3, 4)
682 xlate_report_debug(const struct xlate_ctx *ctx, enum oftrace_node_type type,
683 const char *format, ...)
684 {
685 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(30, 300);
686 if (!OVS_UNLIKELY(ctx->xin->trace)
687 && (!ctx->xin->packet || VLOG_DROP_DBG(&rl))) {
688 return;
689 }
690
691 struct ds s = DS_EMPTY_INITIALIZER;
692 va_list args;
693 va_start(args, format);
694 ds_put_format_valist(&s, format, args);
695 va_end(args);
696
697 if (ctx->xin->trace) {
698 oftrace_report(ctx->xin->trace, type, ds_cstr(&s));
699 } else {
700 VLOG_DBG("bridge %s: %s", ctx->xbridge->name, ds_cstr(&s));
701 }
702 ds_destroy(&s);
703 }
704
705 /* If tracing is enabled in 'ctx', appends a node of the given 'type' to the
706 * trace, whose text is 'title' followed by a formatted version of the
707 * 'ofpacts_len' OpenFlow actions in 'ofpacts'.
708 *
709 * If tracing is not enabled, does nothing. */
710 static void
711 xlate_report_actions(const struct xlate_ctx *ctx, enum oftrace_node_type type,
712 const char *title,
713 const struct ofpact *ofpacts, size_t ofpacts_len)
714 {
715 if (OVS_UNLIKELY(ctx->xin->trace)) {
716 struct ds s = DS_EMPTY_INITIALIZER;
717 ds_put_format(&s, "%s: ", title);
718 struct ofpact_format_params fp = { .s = &s };
719 ofpacts_format(ofpacts, ofpacts_len, &fp);
720 oftrace_report(ctx->xin->trace, type, ds_cstr(&s));
721 ds_destroy(&s);
722 }
723 }
724
725 /* If tracing is enabled in 'ctx', appends a node of type OFT_DETAIL to the
726 * trace, whose the message is a formatted version of the OpenFlow action set.
727 * 'verb' should be "was" or "is", depending on whether the action set reported
728 * is the new action set or the old one.
729 *
730 * If tracing is not enabled, does nothing. */
731 static void
732 xlate_report_action_set(const struct xlate_ctx *ctx, const char *verb)
733 {
734 if (OVS_UNLIKELY(ctx->xin->trace)) {
735 struct ofpbuf action_list;
736 ofpbuf_init(&action_list, 0);
737 ofpacts_execute_action_set(&action_list, &ctx->action_set);
738 if (action_list.size) {
739 struct ds s = DS_EMPTY_INITIALIZER;
740 struct ofpact_format_params fp = { .s = &s };
741 ofpacts_format(action_list.data, action_list.size, &fp);
742 xlate_report(ctx, OFT_DETAIL, "action set %s: %s",
743 verb, ds_cstr(&s));
744 ds_destroy(&s);
745 } else {
746 xlate_report(ctx, OFT_DETAIL, "action set %s empty", verb);
747 }
748 ofpbuf_uninit(&action_list);
749 }
750 }
751
752
753 /* If tracing is enabled in 'ctx', appends a node representing 'rule' (in
754 * OpenFlow table 'table_id') to the trace and makes this node the parent for
755 * future trace nodes. The caller should save ctx->xin->trace before calling
756 * this function, then after tracing all of the activities under the table,
757 * restore its previous value.
758 *
759 * If tracing is not enabled, does nothing. */
760 static void
761 xlate_report_table(const struct xlate_ctx *ctx, struct rule_dpif *rule,
762 uint8_t table_id)
763 {
764 if (OVS_LIKELY(!ctx->xin->trace)) {
765 return;
766 }
767
768 struct ds s = DS_EMPTY_INITIALIZER;
769 ds_put_format(&s, "%2d. ", table_id);
770 if (rule == ctx->xin->ofproto->miss_rule) {
771 ds_put_cstr(&s, "No match, and a \"packet-in\" is called for.");
772 } else if (rule == ctx->xin->ofproto->no_packet_in_rule) {
773 ds_put_cstr(&s, "No match.");
774 } else if (rule == ctx->xin->ofproto->drop_frags_rule) {
775 ds_put_cstr(&s, "Packets are IP fragments and "
776 "the fragment handling mode is \"drop\".");
777 } else {
778 minimatch_format(&rule->up.cr.match,
779 ofproto_get_tun_tab(&ctx->xin->ofproto->up),
780 NULL, &s, OFP_DEFAULT_PRIORITY);
781 if (ds_last(&s) != ' ') {
782 ds_put_cstr(&s, ", ");
783 }
784 ds_put_format(&s, "priority %d", rule->up.cr.priority);
785 if (rule->up.flow_cookie) {
786 ds_put_format(&s, ", cookie %#"PRIx64,
787 ntohll(rule->up.flow_cookie));
788 }
789 }
790 ctx->xin->trace = &oftrace_report(ctx->xin->trace, OFT_TABLE,
791 ds_cstr(&s))->subs;
792 ds_destroy(&s);
793 }
794
795 /* If tracing is enabled in 'ctx', adds an OFT_DETAIL trace node to 'ctx'
796 * reporting the value of subfield 'sf'.
797 *
798 * If tracing is not enabled, does nothing. */
799 static void
800 xlate_report_subfield(const struct xlate_ctx *ctx,
801 const struct mf_subfield *sf)
802 {
803 if (OVS_UNLIKELY(ctx->xin->trace)) {
804 struct ds s = DS_EMPTY_INITIALIZER;
805 mf_format_subfield(sf, &s);
806 ds_put_cstr(&s, " is now ");
807
808 if (sf->ofs == 0 && sf->n_bits >= sf->field->n_bits) {
809 union mf_value value;
810 mf_get_value(sf->field, &ctx->xin->flow, &value);
811 mf_format(sf->field, &value, NULL, NULL, &s);
812 } else {
813 union mf_subvalue cst;
814 mf_read_subfield(sf, &ctx->xin->flow, &cst);
815 ds_put_hex(&s, &cst, sizeof cst);
816 }
817
818 xlate_report(ctx, OFT_DETAIL, "%s", ds_cstr(&s));
819
820 ds_destroy(&s);
821 }
822 }
823 \f
824 static void
825 xlate_xbridge_init(struct xlate_cfg *xcfg, struct xbridge *xbridge)
826 {
827 ovs_list_init(&xbridge->xbundles);
828 hmap_init(&xbridge->xports);
829 hmap_insert(&xcfg->xbridges, &xbridge->hmap_node,
830 hash_pointer(xbridge->ofproto, 0));
831 }
832
833 static void
834 xlate_xbundle_init(struct xlate_cfg *xcfg, struct xbundle *xbundle)
835 {
836 ovs_list_init(&xbundle->xports);
837 ovs_list_insert(&xbundle->xbridge->xbundles, &xbundle->list_node);
838 hmap_insert(&xcfg->xbundles, &xbundle->hmap_node,
839 hash_pointer(xbundle->ofbundle, 0));
840 }
841
842 static void
843 xlate_xport_init(struct xlate_cfg *xcfg, struct xport *xport)
844 {
845 hmap_init(&xport->skb_priorities);
846 hmap_insert(&xcfg->xports, &xport->hmap_node,
847 hash_pointer(xport->ofport, 0));
848 hmap_insert(&xport->xbridge->xports, &xport->ofp_node,
849 hash_ofp_port(xport->ofp_port));
850 hmap_insert(&xcfg->xports_uuid, &xport->uuid_node,
851 uuid_hash(&xport->uuid));
852 }
853
854 static struct xbridge_addr *
855 xbridge_addr_create(struct xbridge *xbridge)
856 {
857 struct xbridge_addr *xbridge_addr = xbridge->addr;
858 struct in6_addr *addr = NULL, *mask = NULL;
859 struct netdev *dev;
860 int err, n_addr = 0;
861
862 err = netdev_open(xbridge->name, NULL, &dev);
863 if (!err) {
864 err = netdev_get_addr_list(dev, &addr, &mask, &n_addr);
865 if (!err) {
866 if (!xbridge->addr ||
867 n_addr != xbridge->addr->n_addr ||
868 (xbridge->addr->addr && memcmp(addr, xbridge->addr->addr,
869 sizeof(*addr) * n_addr))) {
870 xbridge_addr = xzalloc(sizeof *xbridge_addr);
871 xbridge_addr->addr = addr;
872 xbridge_addr->n_addr = n_addr;
873 ovs_refcount_init(&xbridge_addr->ref_cnt);
874 } else {
875 free(addr);
876 }
877 free(mask);
878 }
879 netdev_close(dev);
880 }
881
882 return xbridge_addr;
883 }
884
885 static struct xbridge_addr *
886 xbridge_addr_ref(const struct xbridge_addr *addr_)
887 {
888 struct xbridge_addr *addr = CONST_CAST(struct xbridge_addr *, addr_);
889 if (addr) {
890 ovs_refcount_ref(&addr->ref_cnt);
891 }
892 return addr;
893 }
894
895 static void
896 xbridge_addr_unref(struct xbridge_addr *addr)
897 {
898 if (addr && ovs_refcount_unref_relaxed(&addr->ref_cnt) == 1) {
899 free(addr->addr);
900 free(addr);
901 }
902 }
903
904 static void
905 xlate_xbridge_set(struct xbridge *xbridge,
906 struct dpif *dpif,
907 const struct mac_learning *ml, struct stp *stp,
908 struct rstp *rstp, const struct mcast_snooping *ms,
909 const struct mbridge *mbridge,
910 const struct dpif_sflow *sflow,
911 const struct dpif_ipfix *ipfix,
912 const struct netflow *netflow,
913 bool forward_bpdu, bool has_in_band,
914 const struct dpif_backer_support *support,
915 const struct xbridge_addr *addr)
916 {
917 if (xbridge->ml != ml) {
918 mac_learning_unref(xbridge->ml);
919 xbridge->ml = mac_learning_ref(ml);
920 }
921
922 if (xbridge->ms != ms) {
923 mcast_snooping_unref(xbridge->ms);
924 xbridge->ms = mcast_snooping_ref(ms);
925 }
926
927 if (xbridge->mbridge != mbridge) {
928 mbridge_unref(xbridge->mbridge);
929 xbridge->mbridge = mbridge_ref(mbridge);
930 }
931
932 if (xbridge->sflow != sflow) {
933 dpif_sflow_unref(xbridge->sflow);
934 xbridge->sflow = dpif_sflow_ref(sflow);
935 }
936
937 if (xbridge->ipfix != ipfix) {
938 dpif_ipfix_unref(xbridge->ipfix);
939 xbridge->ipfix = dpif_ipfix_ref(ipfix);
940 }
941
942 if (xbridge->stp != stp) {
943 stp_unref(xbridge->stp);
944 xbridge->stp = stp_ref(stp);
945 }
946
947 if (xbridge->rstp != rstp) {
948 rstp_unref(xbridge->rstp);
949 xbridge->rstp = rstp_ref(rstp);
950 }
951
952 if (xbridge->netflow != netflow) {
953 netflow_unref(xbridge->netflow);
954 xbridge->netflow = netflow_ref(netflow);
955 }
956
957 if (xbridge->addr != addr) {
958 xbridge_addr_unref(xbridge->addr);
959 xbridge->addr = xbridge_addr_ref(addr);
960 }
961
962 xbridge->dpif = dpif;
963 xbridge->forward_bpdu = forward_bpdu;
964 xbridge->has_in_band = has_in_band;
965 xbridge->support = *support;
966 }
967
968 static void
969 xlate_xbundle_set(struct xbundle *xbundle,
970 enum port_vlan_mode vlan_mode, uint16_t qinq_ethtype,
971 int vlan, unsigned long *trunks, unsigned long *cvlans,
972 bool use_priority_tags,
973 const struct bond *bond, const struct lacp *lacp,
974 bool floodable, bool protected)
975 {
976 ovs_assert(xbundle->xbridge);
977
978 xbundle->vlan_mode = vlan_mode;
979 xbundle->qinq_ethtype = qinq_ethtype;
980 xbundle->vlan = vlan;
981 xbundle->trunks = trunks;
982 xbundle->cvlans = cvlans;
983 xbundle->use_priority_tags = use_priority_tags;
984 xbundle->floodable = floodable;
985 xbundle->protected = protected;
986
987 if (xbundle->bond != bond) {
988 bond_unref(xbundle->bond);
989 xbundle->bond = bond_ref(bond);
990 }
991
992 if (xbundle->lacp != lacp) {
993 lacp_unref(xbundle->lacp);
994 xbundle->lacp = lacp_ref(lacp);
995 }
996 }
997
998 static void
999 xlate_xport_set(struct xport *xport, odp_port_t odp_port,
1000 const struct netdev *netdev, const struct cfm *cfm,
1001 const struct bfd *bfd, const struct lldp *lldp, int stp_port_no,
1002 const struct rstp_port* rstp_port,
1003 enum ofputil_port_config config, enum ofputil_port_state state,
1004 bool is_tunnel, bool may_enable)
1005 {
1006 xport->config = config;
1007 xport->state = state;
1008 xport->stp_port_no = stp_port_no;
1009 xport->is_tunnel = is_tunnel;
1010 xport->pt_mode = netdev_get_pt_mode(netdev);
1011 xport->may_enable = may_enable;
1012 xport->odp_port = odp_port;
1013
1014 if (xport->rstp_port != rstp_port) {
1015 rstp_port_unref(xport->rstp_port);
1016 xport->rstp_port = rstp_port_ref(rstp_port);
1017 }
1018
1019 if (xport->cfm != cfm) {
1020 cfm_unref(xport->cfm);
1021 xport->cfm = cfm_ref(cfm);
1022 }
1023
1024 if (xport->bfd != bfd) {
1025 bfd_unref(xport->bfd);
1026 xport->bfd = bfd_ref(bfd);
1027 }
1028
1029 if (xport->lldp != lldp) {
1030 lldp_unref(xport->lldp);
1031 xport->lldp = lldp_ref(lldp);
1032 }
1033
1034 if (xport->netdev != netdev) {
1035 netdev_close(xport->netdev);
1036 xport->netdev = netdev_ref(netdev);
1037 }
1038 }
1039
1040 static void
1041 xlate_xbridge_copy(struct xbridge *xbridge)
1042 {
1043 struct xbundle *xbundle;
1044 struct xport *xport;
1045 struct xbridge *new_xbridge = xzalloc(sizeof *xbridge);
1046 new_xbridge->ofproto = xbridge->ofproto;
1047 new_xbridge->name = xstrdup(xbridge->name);
1048 xlate_xbridge_init(new_xcfg, new_xbridge);
1049
1050 xlate_xbridge_set(new_xbridge,
1051 xbridge->dpif, xbridge->ml, xbridge->stp,
1052 xbridge->rstp, xbridge->ms, xbridge->mbridge,
1053 xbridge->sflow, xbridge->ipfix, xbridge->netflow,
1054 xbridge->forward_bpdu, xbridge->has_in_band,
1055 &xbridge->support, xbridge->addr);
1056 LIST_FOR_EACH (xbundle, list_node, &xbridge->xbundles) {
1057 xlate_xbundle_copy(new_xbridge, xbundle);
1058 }
1059
1060 /* Copy xports which are not part of a xbundle */
1061 HMAP_FOR_EACH (xport, ofp_node, &xbridge->xports) {
1062 if (!xport->xbundle) {
1063 xlate_xport_copy(new_xbridge, NULL, xport);
1064 }
1065 }
1066 }
1067
1068 static void
1069 xlate_xbundle_copy(struct xbridge *xbridge, struct xbundle *xbundle)
1070 {
1071 struct xport *xport;
1072 struct xbundle *new_xbundle = xzalloc(sizeof *xbundle);
1073 new_xbundle->ofbundle = xbundle->ofbundle;
1074 new_xbundle->xbridge = xbridge;
1075 new_xbundle->name = xstrdup(xbundle->name);
1076 xlate_xbundle_init(new_xcfg, new_xbundle);
1077
1078 xlate_xbundle_set(new_xbundle, xbundle->vlan_mode, xbundle->qinq_ethtype,
1079 xbundle->vlan, xbundle->trunks, xbundle->cvlans,
1080 xbundle->use_priority_tags, xbundle->bond, xbundle->lacp,
1081 xbundle->floodable, xbundle->protected);
1082 LIST_FOR_EACH (xport, bundle_node, &xbundle->xports) {
1083 xlate_xport_copy(xbridge, new_xbundle, xport);
1084 }
1085 }
1086
1087 static void
1088 xlate_xport_copy(struct xbridge *xbridge, struct xbundle *xbundle,
1089 struct xport *xport)
1090 {
1091 struct skb_priority_to_dscp *pdscp, *new_pdscp;
1092 struct xport *new_xport = xzalloc(sizeof *xport);
1093 new_xport->ofport = xport->ofport;
1094 new_xport->ofp_port = xport->ofp_port;
1095 new_xport->xbridge = xbridge;
1096 new_xport->uuid = xport->uuid;
1097 xlate_xport_init(new_xcfg, new_xport);
1098
1099 xlate_xport_set(new_xport, xport->odp_port, xport->netdev, xport->cfm,
1100 xport->bfd, xport->lldp, xport->stp_port_no,
1101 xport->rstp_port, xport->config, xport->state,
1102 xport->is_tunnel, xport->may_enable);
1103
1104 if (xport->peer) {
1105 struct xport *peer = xport_lookup(new_xcfg, xport->peer->ofport);
1106 if (peer) {
1107 new_xport->peer = peer;
1108 new_xport->peer->peer = new_xport;
1109 }
1110 }
1111
1112 if (xbundle) {
1113 new_xport->xbundle = xbundle;
1114 ovs_list_insert(&new_xport->xbundle->xports, &new_xport->bundle_node);
1115 }
1116
1117 HMAP_FOR_EACH (pdscp, hmap_node, &xport->skb_priorities) {
1118 new_pdscp = xmalloc(sizeof *pdscp);
1119 new_pdscp->skb_priority = pdscp->skb_priority;
1120 new_pdscp->dscp = pdscp->dscp;
1121 hmap_insert(&new_xport->skb_priorities, &new_pdscp->hmap_node,
1122 hash_int(new_pdscp->skb_priority, 0));
1123 }
1124 }
1125
1126 /* Sets the current xlate configuration to new_xcfg and frees the old xlate
1127 * configuration in xcfgp.
1128 *
1129 * This needs to be called after editing the xlate configuration.
1130 *
1131 * Functions that edit the new xlate configuration are
1132 * xlate_<ofproto/bundle/ofport>_set and xlate_<ofproto/bundle/ofport>_remove.
1133 *
1134 * A sample workflow:
1135 *
1136 * xlate_txn_start();
1137 * ...
1138 * edit_xlate_configuration();
1139 * ...
1140 * xlate_txn_commit(); */
1141 void
1142 xlate_txn_commit(void)
1143 {
1144 struct xlate_cfg *xcfg = ovsrcu_get(struct xlate_cfg *, &xcfgp);
1145
1146 ovsrcu_set(&xcfgp, new_xcfg);
1147 ovsrcu_synchronize();
1148 xlate_xcfg_free(xcfg);
1149 new_xcfg = NULL;
1150 }
1151
1152 /* Copies the current xlate configuration in xcfgp to new_xcfg.
1153 *
1154 * This needs to be called prior to editing the xlate configuration. */
1155 void
1156 xlate_txn_start(void)
1157 {
1158 struct xbridge *xbridge;
1159 struct xlate_cfg *xcfg;
1160
1161 ovs_assert(!new_xcfg);
1162
1163 new_xcfg = xmalloc(sizeof *new_xcfg);
1164 hmap_init(&new_xcfg->xbridges);
1165 hmap_init(&new_xcfg->xbundles);
1166 hmap_init(&new_xcfg->xports);
1167 hmap_init(&new_xcfg->xports_uuid);
1168
1169 xcfg = ovsrcu_get(struct xlate_cfg *, &xcfgp);
1170 if (!xcfg) {
1171 return;
1172 }
1173
1174 HMAP_FOR_EACH (xbridge, hmap_node, &xcfg->xbridges) {
1175 xlate_xbridge_copy(xbridge);
1176 }
1177 }
1178
1179
1180 static void
1181 xlate_xcfg_free(struct xlate_cfg *xcfg)
1182 {
1183 struct xbridge *xbridge, *next_xbridge;
1184
1185 if (!xcfg) {
1186 return;
1187 }
1188
1189 HMAP_FOR_EACH_SAFE (xbridge, next_xbridge, hmap_node, &xcfg->xbridges) {
1190 xlate_xbridge_remove(xcfg, xbridge);
1191 }
1192
1193 hmap_destroy(&xcfg->xbridges);
1194 hmap_destroy(&xcfg->xbundles);
1195 hmap_destroy(&xcfg->xports);
1196 hmap_destroy(&xcfg->xports_uuid);
1197 free(xcfg);
1198 }
1199
1200 void
1201 xlate_ofproto_set(struct ofproto_dpif *ofproto, const char *name,
1202 struct dpif *dpif,
1203 const struct mac_learning *ml, struct stp *stp,
1204 struct rstp *rstp, const struct mcast_snooping *ms,
1205 const struct mbridge *mbridge,
1206 const struct dpif_sflow *sflow,
1207 const struct dpif_ipfix *ipfix,
1208 const struct netflow *netflow,
1209 bool forward_bpdu, bool has_in_band,
1210 const struct dpif_backer_support *support)
1211 {
1212 struct xbridge *xbridge;
1213 struct xbridge_addr *xbridge_addr, *old_addr;
1214
1215 ovs_assert(new_xcfg);
1216
1217 xbridge = xbridge_lookup(new_xcfg, ofproto);
1218 if (!xbridge) {
1219 xbridge = xzalloc(sizeof *xbridge);
1220 xbridge->ofproto = ofproto;
1221
1222 xlate_xbridge_init(new_xcfg, xbridge);
1223 }
1224
1225 free(xbridge->name);
1226 xbridge->name = xstrdup(name);
1227
1228 xbridge_addr = xbridge_addr_create(xbridge);
1229 old_addr = xbridge->addr;
1230
1231 xlate_xbridge_set(xbridge, dpif, ml, stp, rstp, ms, mbridge, sflow, ipfix,
1232 netflow, forward_bpdu, has_in_band, support,
1233 xbridge_addr);
1234
1235 if (xbridge_addr != old_addr) {
1236 xbridge_addr_unref(xbridge_addr);
1237 }
1238 }
1239
1240 static void
1241 xlate_xbridge_remove(struct xlate_cfg *xcfg, struct xbridge *xbridge)
1242 {
1243 struct xbundle *xbundle, *next_xbundle;
1244 struct xport *xport, *next_xport;
1245
1246 if (!xbridge) {
1247 return;
1248 }
1249
1250 HMAP_FOR_EACH_SAFE (xport, next_xport, ofp_node, &xbridge->xports) {
1251 xlate_xport_remove(xcfg, xport);
1252 }
1253
1254 LIST_FOR_EACH_SAFE (xbundle, next_xbundle, list_node, &xbridge->xbundles) {
1255 xlate_xbundle_remove(xcfg, xbundle);
1256 }
1257
1258 hmap_remove(&xcfg->xbridges, &xbridge->hmap_node);
1259 mac_learning_unref(xbridge->ml);
1260 mcast_snooping_unref(xbridge->ms);
1261 mbridge_unref(xbridge->mbridge);
1262 dpif_sflow_unref(xbridge->sflow);
1263 dpif_ipfix_unref(xbridge->ipfix);
1264 netflow_unref(xbridge->netflow);
1265 stp_unref(xbridge->stp);
1266 rstp_unref(xbridge->rstp);
1267 xbridge_addr_unref(xbridge->addr);
1268 hmap_destroy(&xbridge->xports);
1269 free(xbridge->name);
1270 free(xbridge);
1271 }
1272
1273 void
1274 xlate_remove_ofproto(struct ofproto_dpif *ofproto)
1275 {
1276 struct xbridge *xbridge;
1277
1278 ovs_assert(new_xcfg);
1279
1280 xbridge = xbridge_lookup(new_xcfg, ofproto);
1281 xlate_xbridge_remove(new_xcfg, xbridge);
1282 }
1283
1284 void
1285 xlate_bundle_set(struct ofproto_dpif *ofproto, struct ofbundle *ofbundle,
1286 const char *name, enum port_vlan_mode vlan_mode,
1287 uint16_t qinq_ethtype, int vlan,
1288 unsigned long *trunks, unsigned long *cvlans,
1289 bool use_priority_tags,
1290 const struct bond *bond, const struct lacp *lacp,
1291 bool floodable, bool protected)
1292 {
1293 struct xbundle *xbundle;
1294
1295 ovs_assert(new_xcfg);
1296
1297 xbundle = xbundle_lookup(new_xcfg, ofbundle);
1298 if (!xbundle) {
1299 xbundle = xzalloc(sizeof *xbundle);
1300 xbundle->ofbundle = ofbundle;
1301 xbundle->xbridge = xbridge_lookup(new_xcfg, ofproto);
1302
1303 xlate_xbundle_init(new_xcfg, xbundle);
1304 }
1305
1306 free(xbundle->name);
1307 xbundle->name = xstrdup(name);
1308
1309 xlate_xbundle_set(xbundle, vlan_mode, qinq_ethtype, vlan, trunks, cvlans,
1310 use_priority_tags, bond, lacp, floodable, protected);
1311 }
1312
1313 static void
1314 xlate_xbundle_remove(struct xlate_cfg *xcfg, struct xbundle *xbundle)
1315 {
1316 struct xport *xport;
1317
1318 if (!xbundle) {
1319 return;
1320 }
1321
1322 LIST_FOR_EACH_POP (xport, bundle_node, &xbundle->xports) {
1323 xport->xbundle = NULL;
1324 }
1325
1326 hmap_remove(&xcfg->xbundles, &xbundle->hmap_node);
1327 ovs_list_remove(&xbundle->list_node);
1328 bond_unref(xbundle->bond);
1329 lacp_unref(xbundle->lacp);
1330 free(xbundle->name);
1331 free(xbundle);
1332 }
1333
1334 void
1335 xlate_bundle_remove(struct ofbundle *ofbundle)
1336 {
1337 struct xbundle *xbundle;
1338
1339 ovs_assert(new_xcfg);
1340
1341 xbundle = xbundle_lookup(new_xcfg, ofbundle);
1342 xlate_xbundle_remove(new_xcfg, xbundle);
1343 }
1344
1345 void
1346 xlate_ofport_set(struct ofproto_dpif *ofproto, struct ofbundle *ofbundle,
1347 struct ofport_dpif *ofport, ofp_port_t ofp_port,
1348 odp_port_t odp_port, const struct netdev *netdev,
1349 const struct cfm *cfm, const struct bfd *bfd,
1350 const struct lldp *lldp, struct ofport_dpif *peer,
1351 int stp_port_no, const struct rstp_port *rstp_port,
1352 const struct ofproto_port_queue *qdscp_list, size_t n_qdscp,
1353 enum ofputil_port_config config,
1354 enum ofputil_port_state state, bool is_tunnel,
1355 bool may_enable)
1356 {
1357 size_t i;
1358 struct xport *xport;
1359
1360 ovs_assert(new_xcfg);
1361
1362 xport = xport_lookup(new_xcfg, ofport);
1363 if (!xport) {
1364 xport = xzalloc(sizeof *xport);
1365 xport->ofport = ofport;
1366 xport->xbridge = xbridge_lookup(new_xcfg, ofproto);
1367 xport->ofp_port = ofp_port;
1368 uuid_generate(&xport->uuid);
1369
1370 xlate_xport_init(new_xcfg, xport);
1371 }
1372
1373 ovs_assert(xport->ofp_port == ofp_port);
1374
1375 xlate_xport_set(xport, odp_port, netdev, cfm, bfd, lldp,
1376 stp_port_no, rstp_port, config, state, is_tunnel,
1377 may_enable);
1378
1379 if (xport->peer) {
1380 xport->peer->peer = NULL;
1381 }
1382 xport->peer = xport_lookup(new_xcfg, peer);
1383 if (xport->peer) {
1384 xport->peer->peer = xport;
1385 }
1386
1387 if (xport->xbundle) {
1388 ovs_list_remove(&xport->bundle_node);
1389 }
1390 xport->xbundle = xbundle_lookup(new_xcfg, ofbundle);
1391 if (xport->xbundle) {
1392 ovs_list_insert(&xport->xbundle->xports, &xport->bundle_node);
1393 }
1394
1395 clear_skb_priorities(xport);
1396 for (i = 0; i < n_qdscp; i++) {
1397 struct skb_priority_to_dscp *pdscp;
1398 uint32_t skb_priority;
1399
1400 if (dpif_queue_to_priority(xport->xbridge->dpif, qdscp_list[i].queue,
1401 &skb_priority)) {
1402 continue;
1403 }
1404
1405 pdscp = xmalloc(sizeof *pdscp);
1406 pdscp->skb_priority = skb_priority;
1407 pdscp->dscp = (qdscp_list[i].dscp << 2) & IP_DSCP_MASK;
1408 hmap_insert(&xport->skb_priorities, &pdscp->hmap_node,
1409 hash_int(pdscp->skb_priority, 0));
1410 }
1411 }
1412
1413 static void
1414 xlate_xport_remove(struct xlate_cfg *xcfg, struct xport *xport)
1415 {
1416 if (!xport) {
1417 return;
1418 }
1419
1420 if (xport->peer) {
1421 xport->peer->peer = NULL;
1422 xport->peer = NULL;
1423 }
1424
1425 if (xport->xbundle) {
1426 ovs_list_remove(&xport->bundle_node);
1427 }
1428
1429 clear_skb_priorities(xport);
1430 hmap_destroy(&xport->skb_priorities);
1431
1432 hmap_remove(&xcfg->xports, &xport->hmap_node);
1433 hmap_remove(&xcfg->xports_uuid, &xport->uuid_node);
1434 hmap_remove(&xport->xbridge->xports, &xport->ofp_node);
1435
1436 netdev_close(xport->netdev);
1437 rstp_port_unref(xport->rstp_port);
1438 cfm_unref(xport->cfm);
1439 bfd_unref(xport->bfd);
1440 lldp_unref(xport->lldp);
1441 free(xport);
1442 }
1443
1444 void
1445 xlate_ofport_remove(struct ofport_dpif *ofport)
1446 {
1447 struct xport *xport;
1448
1449 ovs_assert(new_xcfg);
1450
1451 xport = xport_lookup(new_xcfg, ofport);
1452 xlate_xport_remove(new_xcfg, xport);
1453 }
1454
1455 static struct ofproto_dpif *
1456 xlate_lookup_ofproto_(const struct dpif_backer *backer, const struct flow *flow,
1457 ofp_port_t *ofp_in_port, const struct xport **xportp)
1458 {
1459 struct xlate_cfg *xcfg = ovsrcu_get(struct xlate_cfg *, &xcfgp);
1460 const struct xport *xport;
1461
1462 /* If packet is recirculated, xport can be retrieved from frozen state. */
1463 if (flow->recirc_id) {
1464 const struct recirc_id_node *recirc_id_node;
1465
1466 recirc_id_node = recirc_id_node_find(flow->recirc_id);
1467
1468 if (OVS_UNLIKELY(!recirc_id_node)) {
1469 return NULL;
1470 }
1471
1472 /* If recirculation was initiated due to bond (in_port = OFPP_NONE)
1473 * then frozen state is static and xport_uuid is not defined, so xport
1474 * cannot be restored from frozen state. */
1475 if (recirc_id_node->state.metadata.in_port != OFPP_NONE) {
1476 struct uuid xport_uuid = recirc_id_node->state.xport_uuid;
1477 xport = xport_lookup_by_uuid(xcfg, &xport_uuid);
1478 if (xport && xport->xbridge && xport->xbridge->ofproto) {
1479 goto out;
1480 }
1481 }
1482 }
1483
1484 xport = xport_lookup(xcfg, tnl_port_should_receive(flow)
1485 ? tnl_port_receive(flow)
1486 : odp_port_to_ofport(backer, flow->in_port.odp_port));
1487 if (OVS_UNLIKELY(!xport)) {
1488 return NULL;
1489 }
1490
1491 out:
1492 *xportp = xport;
1493 if (ofp_in_port) {
1494 *ofp_in_port = xport->ofp_port;
1495 }
1496 return xport->xbridge->ofproto;
1497 }
1498
1499 /* Given a datapath and flow metadata ('backer', and 'flow' respectively)
1500 * returns the corresponding struct ofproto_dpif and OpenFlow port number. */
1501 struct ofproto_dpif *
1502 xlate_lookup_ofproto(const struct dpif_backer *backer, const struct flow *flow,
1503 ofp_port_t *ofp_in_port)
1504 {
1505 const struct xport *xport;
1506
1507 return xlate_lookup_ofproto_(backer, flow, ofp_in_port, &xport);
1508 }
1509
1510 /* Given a datapath and flow metadata ('backer', and 'flow' respectively),
1511 * optionally populates 'ofprotop' with the ofproto_dpif, 'ofp_in_port' with the
1512 * openflow in_port, and 'ipfix', 'sflow', and 'netflow' with the appropriate
1513 * handles for those protocols if they're enabled. Caller may use the returned
1514 * pointers until quiescing, for longer term use additional references must
1515 * be taken.
1516 *
1517 * Returns 0 if successful, ENODEV if the parsed flow has no associated ofproto.
1518 */
1519 int
1520 xlate_lookup(const struct dpif_backer *backer, const struct flow *flow,
1521 struct ofproto_dpif **ofprotop, struct dpif_ipfix **ipfix,
1522 struct dpif_sflow **sflow, struct netflow **netflow,
1523 ofp_port_t *ofp_in_port)
1524 {
1525 struct ofproto_dpif *ofproto;
1526 const struct xport *xport;
1527
1528 ofproto = xlate_lookup_ofproto_(backer, flow, ofp_in_port, &xport);
1529
1530 if (!ofproto) {
1531 return ENODEV;
1532 }
1533
1534 if (ofprotop) {
1535 *ofprotop = ofproto;
1536 }
1537
1538 if (ipfix) {
1539 *ipfix = xport ? xport->xbridge->ipfix : NULL;
1540 }
1541
1542 if (sflow) {
1543 *sflow = xport ? xport->xbridge->sflow : NULL;
1544 }
1545
1546 if (netflow) {
1547 *netflow = xport ? xport->xbridge->netflow : NULL;
1548 }
1549
1550 return 0;
1551 }
1552
1553 static struct xbridge *
1554 xbridge_lookup(struct xlate_cfg *xcfg, const struct ofproto_dpif *ofproto)
1555 {
1556 struct hmap *xbridges;
1557 struct xbridge *xbridge;
1558
1559 if (!ofproto || !xcfg) {
1560 return NULL;
1561 }
1562
1563 xbridges = &xcfg->xbridges;
1564
1565 HMAP_FOR_EACH_IN_BUCKET (xbridge, hmap_node, hash_pointer(ofproto, 0),
1566 xbridges) {
1567 if (xbridge->ofproto == ofproto) {
1568 return xbridge;
1569 }
1570 }
1571 return NULL;
1572 }
1573
1574 static struct xbridge *
1575 xbridge_lookup_by_uuid(struct xlate_cfg *xcfg, const struct uuid *uuid)
1576 {
1577 struct xbridge *xbridge;
1578
1579 HMAP_FOR_EACH (xbridge, hmap_node, &xcfg->xbridges) {
1580 if (uuid_equals(&xbridge->ofproto->uuid, uuid)) {
1581 return xbridge;
1582 }
1583 }
1584 return NULL;
1585 }
1586
1587 static struct xbundle *
1588 xbundle_lookup(struct xlate_cfg *xcfg, const struct ofbundle *ofbundle)
1589 {
1590 struct hmap *xbundles;
1591 struct xbundle *xbundle;
1592
1593 if (!ofbundle || !xcfg) {
1594 return NULL;
1595 }
1596
1597 xbundles = &xcfg->xbundles;
1598
1599 HMAP_FOR_EACH_IN_BUCKET (xbundle, hmap_node, hash_pointer(ofbundle, 0),
1600 xbundles) {
1601 if (xbundle->ofbundle == ofbundle) {
1602 return xbundle;
1603 }
1604 }
1605 return NULL;
1606 }
1607
1608 static struct xport *
1609 xport_lookup(struct xlate_cfg *xcfg, const struct ofport_dpif *ofport)
1610 {
1611 struct hmap *xports;
1612 struct xport *xport;
1613
1614 if (!ofport || !xcfg) {
1615 return NULL;
1616 }
1617
1618 xports = &xcfg->xports;
1619
1620 HMAP_FOR_EACH_IN_BUCKET (xport, hmap_node, hash_pointer(ofport, 0),
1621 xports) {
1622 if (xport->ofport == ofport) {
1623 return xport;
1624 }
1625 }
1626 return NULL;
1627 }
1628
1629 static struct xport *
1630 xport_lookup_by_uuid(struct xlate_cfg *xcfg, const struct uuid *uuid)
1631 {
1632 struct hmap *xports;
1633 struct xport *xport;
1634
1635 if (uuid_is_zero(uuid) || !xcfg) {
1636 return NULL;
1637 }
1638
1639 xports = &xcfg->xports_uuid;
1640
1641 HMAP_FOR_EACH_IN_BUCKET (xport, uuid_node, uuid_hash(uuid), xports) {
1642 if (uuid_equals(&xport->uuid, uuid)) {
1643 return xport;
1644 }
1645 }
1646 return NULL;
1647 }
1648
1649 static struct stp_port *
1650 xport_get_stp_port(const struct xport *xport)
1651 {
1652 return xport->xbridge->stp && xport->stp_port_no != -1
1653 ? stp_get_port(xport->xbridge->stp, xport->stp_port_no)
1654 : NULL;
1655 }
1656
1657 static bool
1658 xport_stp_learn_state(const struct xport *xport)
1659 {
1660 struct stp_port *sp = xport_get_stp_port(xport);
1661 return sp
1662 ? stp_learn_in_state(stp_port_get_state(sp))
1663 : true;
1664 }
1665
1666 static bool
1667 xport_stp_forward_state(const struct xport *xport)
1668 {
1669 struct stp_port *sp = xport_get_stp_port(xport);
1670 return sp
1671 ? stp_forward_in_state(stp_port_get_state(sp))
1672 : true;
1673 }
1674
1675 static bool
1676 xport_stp_should_forward_bpdu(const struct xport *xport)
1677 {
1678 struct stp_port *sp = xport_get_stp_port(xport);
1679 return stp_should_forward_bpdu(sp ? stp_port_get_state(sp) : STP_DISABLED);
1680 }
1681
1682 /* Returns true if STP should process 'flow'. Sets fields in 'wc' that
1683 * were used to make the determination.*/
1684 static bool
1685 stp_should_process_flow(const struct flow *flow, struct flow_wildcards *wc)
1686 {
1687 /* is_stp() also checks dl_type, but dl_type is always set in 'wc'. */
1688 memset(&wc->masks.dl_dst, 0xff, sizeof wc->masks.dl_dst);
1689 return is_stp(flow);
1690 }
1691
1692 static void
1693 stp_process_packet(const struct xport *xport, const struct dp_packet *packet)
1694 {
1695 struct stp_port *sp = xport_get_stp_port(xport);
1696 struct dp_packet payload = *packet;
1697 struct eth_header *eth = dp_packet_data(&payload);
1698
1699 /* Sink packets on ports that have STP disabled when the bridge has
1700 * STP enabled. */
1701 if (!sp || stp_port_get_state(sp) == STP_DISABLED) {
1702 return;
1703 }
1704
1705 /* Trim off padding on payload. */
1706 if (dp_packet_size(&payload) > ntohs(eth->eth_type) + ETH_HEADER_LEN) {
1707 dp_packet_set_size(&payload, ntohs(eth->eth_type) + ETH_HEADER_LEN);
1708 }
1709
1710 if (dp_packet_try_pull(&payload, ETH_HEADER_LEN + LLC_HEADER_LEN)) {
1711 stp_received_bpdu(sp, dp_packet_data(&payload), dp_packet_size(&payload));
1712 }
1713 }
1714
1715 static enum rstp_state
1716 xport_get_rstp_port_state(const struct xport *xport)
1717 {
1718 return xport->rstp_port
1719 ? rstp_port_get_state(xport->rstp_port)
1720 : RSTP_DISABLED;
1721 }
1722
1723 static bool
1724 xport_rstp_learn_state(const struct xport *xport)
1725 {
1726 return xport->xbridge->rstp && xport->rstp_port
1727 ? rstp_learn_in_state(xport_get_rstp_port_state(xport))
1728 : true;
1729 }
1730
1731 static bool
1732 xport_rstp_forward_state(const struct xport *xport)
1733 {
1734 return xport->xbridge->rstp && xport->rstp_port
1735 ? rstp_forward_in_state(xport_get_rstp_port_state(xport))
1736 : true;
1737 }
1738
1739 static bool
1740 xport_rstp_should_manage_bpdu(const struct xport *xport)
1741 {
1742 return rstp_should_manage_bpdu(xport_get_rstp_port_state(xport));
1743 }
1744
1745 static void
1746 rstp_process_packet(const struct xport *xport, const struct dp_packet *packet)
1747 {
1748 struct dp_packet payload = *packet;
1749 struct eth_header *eth = dp_packet_data(&payload);
1750
1751 /* Sink packets on ports that have no RSTP. */
1752 if (!xport->rstp_port) {
1753 return;
1754 }
1755
1756 /* Trim off padding on payload. */
1757 if (dp_packet_size(&payload) > ntohs(eth->eth_type) + ETH_HEADER_LEN) {
1758 dp_packet_set_size(&payload, ntohs(eth->eth_type) + ETH_HEADER_LEN);
1759 }
1760
1761 if (dp_packet_try_pull(&payload, ETH_HEADER_LEN + LLC_HEADER_LEN)) {
1762 rstp_port_received_bpdu(xport->rstp_port, dp_packet_data(&payload),
1763 dp_packet_size(&payload));
1764 }
1765 }
1766
1767 static struct xport *
1768 get_ofp_port(const struct xbridge *xbridge, ofp_port_t ofp_port)
1769 {
1770 struct xport *xport;
1771
1772 HMAP_FOR_EACH_IN_BUCKET (xport, ofp_node, hash_ofp_port(ofp_port),
1773 &xbridge->xports) {
1774 if (xport->ofp_port == ofp_port) {
1775 return xport;
1776 }
1777 }
1778 return NULL;
1779 }
1780
1781 static odp_port_t
1782 ofp_port_to_odp_port(const struct xbridge *xbridge, ofp_port_t ofp_port)
1783 {
1784 const struct xport *xport = get_ofp_port(xbridge, ofp_port);
1785 return xport ? xport->odp_port : ODPP_NONE;
1786 }
1787
1788 static bool
1789 odp_port_is_alive(const struct xlate_ctx *ctx, ofp_port_t ofp_port)
1790 {
1791 struct xport *xport = get_ofp_port(ctx->xbridge, ofp_port);
1792 return xport && xport->may_enable;
1793 }
1794
1795 static struct ofputil_bucket *
1796 group_first_live_bucket(const struct xlate_ctx *, const struct group_dpif *,
1797 int depth);
1798
1799 static bool
1800 group_is_alive(const struct xlate_ctx *ctx, uint32_t group_id, int depth)
1801 {
1802 struct group_dpif *group;
1803
1804 group = group_dpif_lookup(ctx->xbridge->ofproto, group_id,
1805 ctx->xin->tables_version, false);
1806 if (group) {
1807 return group_first_live_bucket(ctx, group, depth) != NULL;
1808 }
1809
1810 return false;
1811 }
1812
1813 #define MAX_LIVENESS_RECURSION 128 /* Arbitrary limit */
1814
1815 static bool
1816 bucket_is_alive(const struct xlate_ctx *ctx,
1817 struct ofputil_bucket *bucket, int depth)
1818 {
1819 if (depth >= MAX_LIVENESS_RECURSION) {
1820 xlate_report_error(ctx, "bucket chaining exceeded %d links",
1821 MAX_LIVENESS_RECURSION);
1822 return false;
1823 }
1824
1825 return (!ofputil_bucket_has_liveness(bucket)
1826 || (bucket->watch_port != OFPP_ANY
1827 && odp_port_is_alive(ctx, bucket->watch_port))
1828 || (bucket->watch_group != OFPG_ANY
1829 && group_is_alive(ctx, bucket->watch_group, depth + 1)));
1830 }
1831
1832 static void
1833 xlate_report_bucket_not_live(const struct xlate_ctx *ctx,
1834 const struct ofputil_bucket *bucket)
1835 {
1836 if (OVS_UNLIKELY(ctx->xin->trace)) {
1837 struct ds s = DS_EMPTY_INITIALIZER;
1838 if (bucket->watch_port != OFPP_ANY) {
1839 ds_put_cstr(&s, "port ");
1840 ofputil_format_port(bucket->watch_port, NULL, &s);
1841 }
1842 if (bucket->watch_group != OFPG_ANY) {
1843 if (s.length) {
1844 ds_put_cstr(&s, " and ");
1845 }
1846 ds_put_format(&s, "port %"PRIu32, bucket->watch_group);
1847 }
1848
1849 xlate_report(ctx, OFT_DETAIL, "bucket %"PRIu32": not live due to %s",
1850 bucket->bucket_id, ds_cstr(&s));
1851
1852 ds_destroy(&s);
1853 }
1854 }
1855
1856 static struct ofputil_bucket *
1857 group_first_live_bucket(const struct xlate_ctx *ctx,
1858 const struct group_dpif *group, int depth)
1859 {
1860 struct ofputil_bucket *bucket;
1861 LIST_FOR_EACH (bucket, list_node, &group->up.buckets) {
1862 if (bucket_is_alive(ctx, bucket, depth)) {
1863 return bucket;
1864 }
1865 xlate_report_bucket_not_live(ctx, bucket);
1866 }
1867
1868 return NULL;
1869 }
1870
1871 static struct ofputil_bucket *
1872 group_best_live_bucket(const struct xlate_ctx *ctx,
1873 const struct group_dpif *group,
1874 uint32_t basis)
1875 {
1876 struct ofputil_bucket *best_bucket = NULL;
1877 uint32_t best_score = 0;
1878
1879 struct ofputil_bucket *bucket;
1880 LIST_FOR_EACH (bucket, list_node, &group->up.buckets) {
1881 if (bucket_is_alive(ctx, bucket, 0)) {
1882 uint32_t score =
1883 (hash_int(bucket->bucket_id, basis) & 0xffff) * bucket->weight;
1884 if (score >= best_score) {
1885 best_bucket = bucket;
1886 best_score = score;
1887 }
1888 xlate_report(ctx, OFT_DETAIL, "bucket %"PRIu32": score %"PRIu32,
1889 bucket->bucket_id, score);
1890 } else {
1891 xlate_report_bucket_not_live(ctx, bucket);
1892 }
1893 }
1894
1895 return best_bucket;
1896 }
1897
1898 static bool
1899 xbundle_trunks_vlan(const struct xbundle *bundle, uint16_t vlan)
1900 {
1901 return (bundle->vlan_mode != PORT_VLAN_ACCESS
1902 && (!bundle->trunks || bitmap_is_set(bundle->trunks, vlan)));
1903 }
1904
1905 static bool
1906 xbundle_allows_cvlan(const struct xbundle *bundle, uint16_t vlan)
1907 {
1908 return (!bundle->cvlans || bitmap_is_set(bundle->cvlans, vlan));
1909 }
1910
1911 static bool
1912 xbundle_includes_vlan(const struct xbundle *xbundle, const struct xvlan *xvlan)
1913 {
1914 switch (xbundle->vlan_mode) {
1915 case PORT_VLAN_ACCESS:
1916 return xvlan->v[0].vid == xbundle->vlan && xvlan->v[1].vid == 0;
1917
1918 case PORT_VLAN_TRUNK:
1919 case PORT_VLAN_NATIVE_UNTAGGED:
1920 case PORT_VLAN_NATIVE_TAGGED:
1921 return xbundle_trunks_vlan(xbundle, xvlan->v[0].vid);
1922
1923 case PORT_VLAN_DOT1Q_TUNNEL:
1924 return xvlan->v[0].vid == xbundle->vlan &&
1925 xbundle_allows_cvlan(xbundle, xvlan->v[1].vid);
1926
1927 default:
1928 OVS_NOT_REACHED();
1929 }
1930 }
1931
1932 static mirror_mask_t
1933 xbundle_mirror_out(const struct xbridge *xbridge, struct xbundle *xbundle)
1934 {
1935 return xbundle != &ofpp_none_bundle
1936 ? mirror_bundle_out(xbridge->mbridge, xbundle->ofbundle)
1937 : 0;
1938 }
1939
1940 static mirror_mask_t
1941 xbundle_mirror_src(const struct xbridge *xbridge, struct xbundle *xbundle)
1942 {
1943 return xbundle != &ofpp_none_bundle
1944 ? mirror_bundle_src(xbridge->mbridge, xbundle->ofbundle)
1945 : 0;
1946 }
1947
1948 static mirror_mask_t
1949 xbundle_mirror_dst(const struct xbridge *xbridge, struct xbundle *xbundle)
1950 {
1951 return xbundle != &ofpp_none_bundle
1952 ? mirror_bundle_dst(xbridge->mbridge, xbundle->ofbundle)
1953 : 0;
1954 }
1955
1956 static struct xbundle *
1957 lookup_input_bundle__(const struct xbridge *xbridge,
1958 ofp_port_t in_port, struct xport **in_xportp)
1959 {
1960 struct xport *xport;
1961
1962 /* Find the port and bundle for the received packet. */
1963 xport = get_ofp_port(xbridge, in_port);
1964 if (in_xportp) {
1965 *in_xportp = xport;
1966 }
1967 if (xport && xport->xbundle) {
1968 return xport->xbundle;
1969 }
1970
1971 /* Special-case OFPP_NONE (OF1.0) and OFPP_CONTROLLER (OF1.1+),
1972 * which a controller may use as the ingress port for traffic that
1973 * it is sourcing. */
1974 if (in_port == OFPP_CONTROLLER || in_port == OFPP_NONE) {
1975 return &ofpp_none_bundle;
1976 }
1977 return NULL;
1978 }
1979
1980 static struct xbundle *
1981 lookup_input_bundle(const struct xlate_ctx *ctx,
1982 ofp_port_t in_port, struct xport **in_xportp)
1983 {
1984 struct xbundle *xbundle = lookup_input_bundle__(ctx->xbridge,
1985 in_port, in_xportp);
1986 if (!xbundle) {
1987 /* Odd. A few possible reasons here:
1988 *
1989 * - We deleted a port but there are still a few packets queued up
1990 * from it.
1991 *
1992 * - Someone externally added a port (e.g. "ovs-dpctl add-if") that
1993 * we don't know about.
1994 *
1995 * - The ofproto client didn't configure the port as part of a bundle.
1996 * This is particularly likely to happen if a packet was received on
1997 * the port after it was created, but before the client had a chance
1998 * to configure its bundle.
1999 */
2000 xlate_report_error(ctx, "received packet on unknown port %"PRIu32,
2001 in_port);
2002 }
2003 return xbundle;
2004 }
2005
2006 /* Mirrors the packet represented by 'ctx' to appropriate mirror destinations,
2007 * given the packet is ingressing or egressing on 'xbundle', which has ingress
2008 * or egress (as appropriate) mirrors 'mirrors'. */
2009 static void
2010 mirror_packet(struct xlate_ctx *ctx, struct xbundle *xbundle,
2011 mirror_mask_t mirrors)
2012 {
2013 struct xvlan in_xvlan;
2014 struct xvlan xvlan;
2015
2016 /* Figure out what VLAN the packet is in (because mirrors can select
2017 * packets on basis of VLAN). */
2018 xvlan_extract(&ctx->xin->flow, &in_xvlan);
2019 if (!input_vid_is_valid(ctx, in_xvlan.v[0].vid, xbundle)) {
2020 return;
2021 }
2022 xvlan_input_translate(xbundle, &in_xvlan, &xvlan);
2023
2024 const struct xbridge *xbridge = ctx->xbridge;
2025
2026 /* Don't mirror to destinations that we've already mirrored to. */
2027 mirrors &= ~ctx->mirrors;
2028 if (!mirrors) {
2029 return;
2030 }
2031
2032 if (ctx->xin->resubmit_stats) {
2033 mirror_update_stats(xbridge->mbridge, mirrors,
2034 ctx->xin->resubmit_stats->n_packets,
2035 ctx->xin->resubmit_stats->n_bytes);
2036 }
2037 if (ctx->xin->xcache) {
2038 struct xc_entry *entry;
2039
2040 entry = xlate_cache_add_entry(ctx->xin->xcache, XC_MIRROR);
2041 entry->mirror.mbridge = mbridge_ref(xbridge->mbridge);
2042 entry->mirror.mirrors = mirrors;
2043 }
2044
2045 /* 'mirrors' is a bit-mask of candidates for mirroring. Iterate as long as
2046 * some candidates remain. */
2047 while (mirrors) {
2048 const unsigned long *vlans;
2049 mirror_mask_t dup_mirrors;
2050 struct ofbundle *out;
2051 int out_vlan;
2052 int snaplen;
2053
2054 /* Get the details of the mirror represented by the rightmost 1-bit. */
2055 ovs_assert(mirror_get(xbridge->mbridge, raw_ctz(mirrors),
2056 &vlans, &dup_mirrors,
2057 &out, &snaplen, &out_vlan));
2058
2059
2060 /* If this mirror selects on the basis of VLAN, and it does not select
2061 * 'vlan', then discard this mirror and go on to the next one. */
2062 if (vlans) {
2063 ctx->wc->masks.vlans[0].tci |= htons(VLAN_CFI | VLAN_VID_MASK);
2064 }
2065 if (vlans && !bitmap_is_set(vlans, xvlan.v[0].vid)) {
2066 mirrors = zero_rightmost_1bit(mirrors);
2067 continue;
2068 }
2069
2070 /* Record the mirror, and the mirrors that output to the same
2071 * destination, so that we don't mirror to them again. This must be
2072 * done now to ensure that output_normal(), below, doesn't recursively
2073 * output to the same mirrors. */
2074 ctx->mirrors |= dup_mirrors;
2075 ctx->mirror_snaplen = snaplen;
2076
2077 /* Send the packet to the mirror. */
2078 if (out) {
2079 struct xbundle *out_xbundle = xbundle_lookup(ctx->xcfg, out);
2080 if (out_xbundle) {
2081 output_normal(ctx, out_xbundle, &xvlan);
2082 }
2083 } else if (xvlan.v[0].vid != out_vlan
2084 && !eth_addr_is_reserved(ctx->xin->flow.dl_dst)) {
2085 struct xbundle *xb;
2086 uint16_t old_vid = xvlan.v[0].vid;
2087
2088 xvlan.v[0].vid = out_vlan;
2089 LIST_FOR_EACH (xb, list_node, &xbridge->xbundles) {
2090 if (xbundle_includes_vlan(xb, &xvlan)
2091 && !xbundle_mirror_out(xbridge, xb)) {
2092 output_normal(ctx, xb, &xvlan);
2093 }
2094 }
2095 xvlan.v[0].vid = old_vid;
2096 }
2097
2098 /* output_normal() could have recursively output (to different
2099 * mirrors), so make sure that we don't send duplicates. */
2100 mirrors &= ~ctx->mirrors;
2101 ctx->mirror_snaplen = 0;
2102 }
2103 }
2104
2105 static void
2106 mirror_ingress_packet(struct xlate_ctx *ctx)
2107 {
2108 if (mbridge_has_mirrors(ctx->xbridge->mbridge)) {
2109 struct xbundle *xbundle = lookup_input_bundle(
2110 ctx, ctx->xin->flow.in_port.ofp_port, NULL);
2111 if (xbundle) {
2112 mirror_packet(ctx, xbundle,
2113 xbundle_mirror_src(ctx->xbridge, xbundle));
2114 }
2115 }
2116 }
2117
2118 /* Checks whether a packet with the given 'vid' may ingress on 'in_xbundle'.
2119 * If so, returns true. Otherwise, returns false.
2120 *
2121 * 'vid' should be the VID obtained from the 802.1Q header that was received as
2122 * part of a packet (specify 0 if there was no 802.1Q header), in the range
2123 * 0...4095. */
2124 static bool
2125 input_vid_is_valid(const struct xlate_ctx *ctx,
2126 uint16_t vid, struct xbundle *in_xbundle)
2127 {
2128 /* Allow any VID on the OFPP_NONE port. */
2129 if (in_xbundle == &ofpp_none_bundle) {
2130 return true;
2131 }
2132
2133 switch (in_xbundle->vlan_mode) {
2134 case PORT_VLAN_ACCESS:
2135 if (vid) {
2136 xlate_report_error(ctx, "dropping VLAN %"PRIu16" tagged "
2137 "packet received on port %s configured as VLAN "
2138 "%d access port", vid, in_xbundle->name,
2139 in_xbundle->vlan);
2140 return false;
2141 }
2142 return true;
2143
2144 case PORT_VLAN_NATIVE_UNTAGGED:
2145 case PORT_VLAN_NATIVE_TAGGED:
2146 if (!vid) {
2147 /* Port must always carry its native VLAN. */
2148 return true;
2149 }
2150 /* Fall through. */
2151 case PORT_VLAN_TRUNK:
2152 if (!xbundle_trunks_vlan(in_xbundle, vid)) {
2153 xlate_report_error(ctx, "dropping VLAN %"PRIu16" packet "
2154 "received on port %s not configured for "
2155 "trunking VLAN %"PRIu16,
2156 vid, in_xbundle->name, vid);
2157 return false;
2158 }
2159 return true;
2160
2161 case PORT_VLAN_DOT1Q_TUNNEL:
2162 if (!xbundle_allows_cvlan(in_xbundle, vid)) {
2163 xlate_report_error(ctx, "dropping VLAN %"PRIu16" packet received "
2164 "on dot1q-tunnel port %s that excludes this "
2165 "VLAN", vid, in_xbundle->name);
2166 return false;
2167 }
2168 return true;
2169
2170 default:
2171 OVS_NOT_REACHED();
2172 }
2173
2174 }
2175
2176 static void
2177 xvlan_copy(struct xvlan *dst, const struct xvlan *src)
2178 {
2179 *dst = *src;
2180 }
2181
2182 static void
2183 xvlan_pop(struct xvlan *src)
2184 {
2185 memmove(&src->v[0], &src->v[1], sizeof(src->v) - sizeof(src->v[0]));
2186 memset(&src->v[FLOW_MAX_VLAN_HEADERS - 1], 0,
2187 sizeof(src->v[FLOW_MAX_VLAN_HEADERS - 1]));
2188 }
2189
2190 static void
2191 xvlan_push_uninit(struct xvlan *src)
2192 {
2193 memmove(&src->v[1], &src->v[0], sizeof(src->v) - sizeof(src->v[0]));
2194 memset(&src->v[0], 0, sizeof(src->v[0]));
2195 }
2196
2197 /* Extract VLAN information (headers) from flow */
2198 static void
2199 xvlan_extract(const struct flow *flow, struct xvlan *xvlan)
2200 {
2201 int i;
2202 memset(xvlan, 0, sizeof(*xvlan));
2203 for (i = 0; i < FLOW_MAX_VLAN_HEADERS; i++) {
2204 if (!eth_type_vlan(flow->vlans[i].tpid) ||
2205 !(flow->vlans[i].tci & htons(VLAN_CFI))) {
2206 break;
2207 }
2208 xvlan->v[i].tpid = ntohs(flow->vlans[i].tpid);
2209 xvlan->v[i].vid = vlan_tci_to_vid(flow->vlans[i].tci);
2210 xvlan->v[i].pcp = ntohs(flow->vlans[i].tci) & VLAN_PCP_MASK;
2211 }
2212 }
2213
2214 /* Put VLAN information (headers) to flow */
2215 static void
2216 xvlan_put(struct flow *flow, const struct xvlan *xvlan)
2217 {
2218 ovs_be16 tci;
2219 int i;
2220 for (i = 0; i < FLOW_MAX_VLAN_HEADERS; i++) {
2221 tci = htons(xvlan->v[i].vid | (xvlan->v[i].pcp & VLAN_PCP_MASK));
2222 if (tci) {
2223 tci |= htons(VLAN_CFI);
2224 flow->vlans[i].tpid = xvlan->v[i].tpid ?
2225 htons(xvlan->v[i].tpid) :
2226 htons(ETH_TYPE_VLAN_8021Q);
2227 }
2228 flow->vlans[i].tci = tci;
2229 }
2230 }
2231
2232 /* Given 'in_xvlan', extracted from the input 802.1Q headers received as part
2233 * of a packet, and 'in_xbundle', the bundle on which the packet was received,
2234 * returns the VLANs of the packet during bridge internal processing. */
2235 static void
2236 xvlan_input_translate(const struct xbundle *in_xbundle,
2237 const struct xvlan *in_xvlan, struct xvlan *xvlan)
2238 {
2239
2240 switch (in_xbundle->vlan_mode) {
2241 case PORT_VLAN_ACCESS:
2242 memset(xvlan, 0, sizeof(*xvlan));
2243 xvlan->v[0].tpid = in_xvlan->v[0].tpid ? in_xvlan->v[0].tpid :
2244 ETH_TYPE_VLAN_8021Q;
2245 xvlan->v[0].vid = in_xbundle->vlan;
2246 xvlan->v[0].pcp = in_xvlan->v[0].pcp;
2247 break;
2248
2249 case PORT_VLAN_TRUNK:
2250 xvlan_copy(xvlan, in_xvlan);
2251 break;
2252
2253 case PORT_VLAN_NATIVE_UNTAGGED:
2254 case PORT_VLAN_NATIVE_TAGGED:
2255 xvlan_copy(xvlan, in_xvlan);
2256 if (!in_xvlan->v[0].vid) {
2257 xvlan->v[0].tpid = in_xvlan->v[0].tpid ? in_xvlan->v[0].tpid :
2258 ETH_TYPE_VLAN_8021Q;
2259 xvlan->v[0].vid = in_xbundle->vlan;
2260 xvlan->v[0].pcp = in_xvlan->v[0].pcp;
2261 }
2262 break;
2263
2264 case PORT_VLAN_DOT1Q_TUNNEL:
2265 xvlan_copy(xvlan, in_xvlan);
2266 xvlan_push_uninit(xvlan);
2267 xvlan->v[0].tpid = in_xbundle->qinq_ethtype;
2268 xvlan->v[0].vid = in_xbundle->vlan;
2269 xvlan->v[0].pcp = 0;
2270 break;
2271
2272 default:
2273 OVS_NOT_REACHED();
2274 }
2275 }
2276
2277 /* Given 'xvlan', the VLANs of a packet during internal processing, and
2278 * 'out_xbundle', a bundle on which the packet is to be output, returns the
2279 * VLANs that should be included in output packet. */
2280 static void
2281 xvlan_output_translate(const struct xbundle *out_xbundle,
2282 const struct xvlan *xvlan, struct xvlan *out_xvlan)
2283 {
2284 switch (out_xbundle->vlan_mode) {
2285 case PORT_VLAN_ACCESS:
2286 memset(out_xvlan, 0, sizeof(*out_xvlan));
2287 break;
2288
2289 case PORT_VLAN_TRUNK:
2290 case PORT_VLAN_NATIVE_TAGGED:
2291 xvlan_copy(out_xvlan, xvlan);
2292 break;
2293
2294 case PORT_VLAN_NATIVE_UNTAGGED:
2295 xvlan_copy(out_xvlan, xvlan);
2296 if (xvlan->v[0].vid == out_xbundle->vlan) {
2297 xvlan_pop(out_xvlan);
2298 }
2299 break;
2300
2301 case PORT_VLAN_DOT1Q_TUNNEL:
2302 xvlan_copy(out_xvlan, xvlan);
2303 xvlan_pop(out_xvlan);
2304 break;
2305
2306 default:
2307 OVS_NOT_REACHED();
2308 }
2309 }
2310
2311 /* If output xbundle is dot1q-tunnel, set mask bits of cvlan */
2312 static void
2313 check_and_set_cvlan_mask(struct flow_wildcards *wc,
2314 const struct xbundle *xbundle)
2315 {
2316 if (xbundle->vlan_mode == PORT_VLAN_DOT1Q_TUNNEL && xbundle->cvlans) {
2317 wc->masks.vlans[1].tci = htons(0xffff);
2318 }
2319 }
2320
2321 static void
2322 output_normal(struct xlate_ctx *ctx, const struct xbundle *out_xbundle,
2323 const struct xvlan *xvlan)
2324 {
2325 uint16_t vid;
2326 union flow_vlan_hdr old_vlans[FLOW_MAX_VLAN_HEADERS];
2327 struct xport *xport;
2328 struct xlate_bond_recirc xr;
2329 bool use_recirc = false;
2330 struct xvlan out_xvlan;
2331
2332 check_and_set_cvlan_mask(ctx->wc, out_xbundle);
2333
2334 xvlan_output_translate(out_xbundle, xvlan, &out_xvlan);
2335 if (out_xbundle->use_priority_tags) {
2336 out_xvlan.v[0].pcp = ntohs(ctx->xin->flow.vlans[0].tci) &
2337 VLAN_PCP_MASK;
2338 }
2339 vid = out_xvlan.v[0].vid;
2340 if (ovs_list_is_empty(&out_xbundle->xports)) {
2341 /* Partially configured bundle with no slaves. Drop the packet. */
2342 return;
2343 } else if (!out_xbundle->bond) {
2344 xport = CONTAINER_OF(ovs_list_front(&out_xbundle->xports), struct xport,
2345 bundle_node);
2346 } else {
2347 struct flow_wildcards *wc = ctx->wc;
2348 struct ofport_dpif *ofport;
2349
2350 if (ctx->xbridge->support.odp.recirc) {
2351 /* In case recirculation is not actually in use, 'xr.recirc_id'
2352 * will be set to '0', since a valid 'recirc_id' can
2353 * not be zero. */
2354 bond_update_post_recirc_rules(out_xbundle->bond,
2355 &xr.recirc_id,
2356 &xr.hash_basis);
2357 if (xr.recirc_id) {
2358 /* Use recirculation instead of output. */
2359 use_recirc = true;
2360 xr.hash_alg = OVS_HASH_ALG_L4;
2361 /* Recirculation does not require unmasking hash fields. */
2362 wc = NULL;
2363 }
2364 }
2365
2366 ofport = bond_choose_output_slave(out_xbundle->bond,
2367 &ctx->xin->flow, wc, vid);
2368 xport = xport_lookup(ctx->xcfg, ofport);
2369
2370 if (!xport) {
2371 /* No slaves enabled, so drop packet. */
2372 return;
2373 }
2374
2375 /* If use_recirc is set, the main thread will handle stats
2376 * accounting for this bond. */
2377 if (!use_recirc) {
2378 if (ctx->xin->resubmit_stats) {
2379 bond_account(out_xbundle->bond, &ctx->xin->flow, vid,
2380 ctx->xin->resubmit_stats->n_bytes);
2381 }
2382 if (ctx->xin->xcache) {
2383 struct xc_entry *entry;
2384 struct flow *flow;
2385
2386 flow = &ctx->xin->flow;
2387 entry = xlate_cache_add_entry(ctx->xin->xcache, XC_BOND);
2388 entry->bond.bond = bond_ref(out_xbundle->bond);
2389 entry->bond.flow = xmemdup(flow, sizeof *flow);
2390 entry->bond.vid = vid;
2391 }
2392 }
2393 }
2394
2395 memcpy(&old_vlans, &ctx->xin->flow.vlans, sizeof(old_vlans));
2396 xvlan_put(&ctx->xin->flow, &out_xvlan);
2397
2398 compose_output_action(ctx, xport->ofp_port, use_recirc ? &xr : NULL,
2399 false, false);
2400 memcpy(&ctx->xin->flow.vlans, &old_vlans, sizeof(old_vlans));
2401 }
2402
2403 /* A VM broadcasts a gratuitous ARP to indicate that it has resumed after
2404 * migration. Older Citrix-patched Linux DomU used gratuitous ARP replies to
2405 * indicate this; newer upstream kernels use gratuitous ARP requests. */
2406 static bool
2407 is_gratuitous_arp(const struct flow *flow, struct flow_wildcards *wc)
2408 {
2409 if (flow->dl_type != htons(ETH_TYPE_ARP)) {
2410 return false;
2411 }
2412
2413 memset(&wc->masks.dl_dst, 0xff, sizeof wc->masks.dl_dst);
2414 if (!eth_addr_is_broadcast(flow->dl_dst)) {
2415 return false;
2416 }
2417
2418 memset(&wc->masks.nw_proto, 0xff, sizeof wc->masks.nw_proto);
2419 if (flow->nw_proto == ARP_OP_REPLY) {
2420 return true;
2421 } else if (flow->nw_proto == ARP_OP_REQUEST) {
2422 memset(&wc->masks.nw_src, 0xff, sizeof wc->masks.nw_src);
2423 memset(&wc->masks.nw_dst, 0xff, sizeof wc->masks.nw_dst);
2424
2425 return flow->nw_src == flow->nw_dst;
2426 } else {
2427 return false;
2428 }
2429 }
2430
2431 /* Determines whether packets in 'flow' within 'xbridge' should be forwarded or
2432 * dropped. Returns true if they may be forwarded, false if they should be
2433 * dropped.
2434 *
2435 * 'in_port' must be the xport that corresponds to flow->in_port.
2436 * 'in_port' must be part of a bundle (e.g. in_port->bundle must be nonnull).
2437 *
2438 * 'vlan' must be the VLAN that corresponds to flow->vlan_tci on 'in_port', as
2439 * returned by input_vid_to_vlan(). It must be a valid VLAN for 'in_port', as
2440 * checked by input_vid_is_valid().
2441 *
2442 * May also add tags to '*tags', although the current implementation only does
2443 * so in one special case.
2444 */
2445 static bool
2446 is_admissible(struct xlate_ctx *ctx, struct xport *in_port,
2447 uint16_t vlan)
2448 {
2449 struct xbundle *in_xbundle = in_port->xbundle;
2450 const struct xbridge *xbridge = ctx->xbridge;
2451 struct flow *flow = &ctx->xin->flow;
2452
2453 /* Drop frames for reserved multicast addresses
2454 * only if forward_bpdu option is absent. */
2455 if (!xbridge->forward_bpdu && eth_addr_is_reserved(flow->dl_dst)) {
2456 xlate_report(ctx, OFT_DETAIL,
2457 "packet has reserved destination MAC, dropping");
2458 return false;
2459 }
2460
2461 if (in_xbundle->bond) {
2462 struct mac_entry *mac;
2463
2464 switch (bond_check_admissibility(in_xbundle->bond, in_port->ofport,
2465 flow->dl_dst)) {
2466 case BV_ACCEPT:
2467 break;
2468
2469 case BV_DROP:
2470 xlate_report(ctx, OFT_DETAIL,
2471 "bonding refused admissibility, dropping");
2472 return false;
2473
2474 case BV_DROP_IF_MOVED:
2475 ovs_rwlock_rdlock(&xbridge->ml->rwlock);
2476 mac = mac_learning_lookup(xbridge->ml, flow->dl_src, vlan);
2477 if (mac
2478 && mac_entry_get_port(xbridge->ml, mac) != in_xbundle->ofbundle
2479 && (!is_gratuitous_arp(flow, ctx->wc)
2480 || mac_entry_is_grat_arp_locked(mac))) {
2481 ovs_rwlock_unlock(&xbridge->ml->rwlock);
2482 xlate_report(ctx, OFT_DETAIL,
2483 "SLB bond thinks this packet looped back, "
2484 "dropping");
2485 return false;
2486 }
2487 ovs_rwlock_unlock(&xbridge->ml->rwlock);
2488 break;
2489 }
2490 }
2491
2492 return true;
2493 }
2494
2495 static bool
2496 update_learning_table__(const struct xbridge *xbridge,
2497 struct xbundle *in_xbundle, struct eth_addr dl_src,
2498 int vlan, bool is_grat_arp)
2499 {
2500 return (in_xbundle == &ofpp_none_bundle
2501 || !mac_learning_update(xbridge->ml, dl_src, vlan,
2502 is_grat_arp,
2503 in_xbundle->bond != NULL,
2504 in_xbundle->ofbundle));
2505 }
2506
2507 static void
2508 update_learning_table(const struct xlate_ctx *ctx,
2509 struct xbundle *in_xbundle, struct eth_addr dl_src,
2510 int vlan, bool is_grat_arp)
2511 {
2512 if (!update_learning_table__(ctx->xbridge, in_xbundle, dl_src, vlan,
2513 is_grat_arp)) {
2514 xlate_report_debug(ctx, OFT_DETAIL, "learned that "ETH_ADDR_FMT" is "
2515 "on port %s in VLAN %d",
2516 ETH_ADDR_ARGS(dl_src), in_xbundle->name, vlan);
2517 }
2518 }
2519
2520 /* Updates multicast snooping table 'ms' given that a packet matching 'flow'
2521 * was received on 'in_xbundle' in 'vlan' and is either Report or Query. */
2522 static void
2523 update_mcast_snooping_table4__(const struct xlate_ctx *ctx,
2524 const struct flow *flow,
2525 struct mcast_snooping *ms, int vlan,
2526 struct xbundle *in_xbundle,
2527 const struct dp_packet *packet)
2528 OVS_REQ_WRLOCK(ms->rwlock)
2529 {
2530 const struct igmp_header *igmp;
2531 int count;
2532 size_t offset;
2533 ovs_be32 ip4 = flow->igmp_group_ip4;
2534
2535 offset = (char *) dp_packet_l4(packet) - (char *) dp_packet_data(packet);
2536 igmp = dp_packet_at(packet, offset, IGMP_HEADER_LEN);
2537 if (!igmp || csum(igmp, dp_packet_l4_size(packet)) != 0) {
2538 xlate_report_debug(ctx, OFT_DETAIL,
2539 "multicast snooping received bad IGMP "
2540 "checksum on port %s in VLAN %d",
2541 in_xbundle->name, vlan);
2542 return;
2543 }
2544
2545 switch (ntohs(flow->tp_src)) {
2546 case IGMP_HOST_MEMBERSHIP_REPORT:
2547 case IGMPV2_HOST_MEMBERSHIP_REPORT:
2548 if (mcast_snooping_add_group4(ms, ip4, vlan, in_xbundle->ofbundle)) {
2549 xlate_report_debug(ctx, OFT_DETAIL,
2550 "multicast snooping learned that "
2551 IP_FMT" is on port %s in VLAN %d",
2552 IP_ARGS(ip4), in_xbundle->name, vlan);
2553 }
2554 break;
2555 case IGMP_HOST_LEAVE_MESSAGE:
2556 if (mcast_snooping_leave_group4(ms, ip4, vlan, in_xbundle->ofbundle)) {
2557 xlate_report_debug(ctx, OFT_DETAIL, "multicast snooping leaving "
2558 IP_FMT" is on port %s in VLAN %d",
2559 IP_ARGS(ip4), in_xbundle->name, vlan);
2560 }
2561 break;
2562 case IGMP_HOST_MEMBERSHIP_QUERY:
2563 if (flow->nw_src && mcast_snooping_add_mrouter(ms, vlan,
2564 in_xbundle->ofbundle)) {
2565 xlate_report_debug(ctx, OFT_DETAIL, "multicast snooping query "
2566 "from "IP_FMT" is on port %s in VLAN %d",
2567 IP_ARGS(flow->nw_src), in_xbundle->name, vlan);
2568 }
2569 break;
2570 case IGMPV3_HOST_MEMBERSHIP_REPORT:
2571 count = mcast_snooping_add_report(ms, packet, vlan,
2572 in_xbundle->ofbundle);
2573 if (count) {
2574 xlate_report_debug(ctx, OFT_DETAIL, "multicast snooping processed "
2575 "%d addresses on port %s in VLAN %d",
2576 count, in_xbundle->name, vlan);
2577 }
2578 break;
2579 }
2580 }
2581
2582 static void
2583 update_mcast_snooping_table6__(const struct xlate_ctx *ctx,
2584 const struct flow *flow,
2585 struct mcast_snooping *ms, int vlan,
2586 struct xbundle *in_xbundle,
2587 const struct dp_packet *packet)
2588 OVS_REQ_WRLOCK(ms->rwlock)
2589 {
2590 const struct mld_header *mld;
2591 int count;
2592 size_t offset;
2593
2594 offset = (char *) dp_packet_l4(packet) - (char *) dp_packet_data(packet);
2595 mld = dp_packet_at(packet, offset, MLD_HEADER_LEN);
2596
2597 if (!mld ||
2598 packet_csum_upperlayer6(dp_packet_l3(packet),
2599 mld, IPPROTO_ICMPV6,
2600 dp_packet_l4_size(packet)) != 0) {
2601 xlate_report_debug(ctx, OFT_DETAIL, "multicast snooping received "
2602 "bad MLD checksum on port %s in VLAN %d",
2603 in_xbundle->name, vlan);
2604 return;
2605 }
2606
2607 switch (ntohs(flow->tp_src)) {
2608 case MLD_QUERY:
2609 if (!ipv6_addr_equals(&flow->ipv6_src, &in6addr_any)
2610 && mcast_snooping_add_mrouter(ms, vlan, in_xbundle->ofbundle)) {
2611 xlate_report_debug(ctx, OFT_DETAIL, "multicast snooping query on "
2612 "port %s in VLAN %d", in_xbundle->name, vlan);
2613 }
2614 break;
2615 case MLD_REPORT:
2616 case MLD_DONE:
2617 case MLD2_REPORT:
2618 count = mcast_snooping_add_mld(ms, packet, vlan, in_xbundle->ofbundle);
2619 if (count) {
2620 xlate_report_debug(ctx, OFT_DETAIL, "multicast snooping processed "
2621 "%d addresses on port %s in VLAN %d",
2622 count, in_xbundle->name, vlan);
2623 }
2624 break;
2625 }
2626 }
2627
2628 /* Updates multicast snooping table 'ms' given that a packet matching 'flow'
2629 * was received on 'in_xbundle' in 'vlan'. */
2630 static void
2631 update_mcast_snooping_table(const struct xlate_ctx *ctx,
2632 const struct flow *flow, int vlan,
2633 struct xbundle *in_xbundle,
2634 const struct dp_packet *packet)
2635 {
2636 struct mcast_snooping *ms = ctx->xbridge->ms;
2637 struct xbundle *mcast_xbundle;
2638 struct mcast_port_bundle *fport;
2639
2640 /* Don't learn the OFPP_NONE port. */
2641 if (in_xbundle == &ofpp_none_bundle) {
2642 return;
2643 }
2644
2645 /* Don't learn from flood ports */
2646 mcast_xbundle = NULL;
2647 ovs_rwlock_wrlock(&ms->rwlock);
2648 LIST_FOR_EACH(fport, node, &ms->fport_list) {
2649 mcast_xbundle = xbundle_lookup(ctx->xcfg, fport->port);
2650 if (mcast_xbundle == in_xbundle) {
2651 break;
2652 }
2653 }
2654
2655 if (!mcast_xbundle || mcast_xbundle != in_xbundle) {
2656 if (flow->dl_type == htons(ETH_TYPE_IP)) {
2657 update_mcast_snooping_table4__(ctx, flow, ms, vlan,
2658 in_xbundle, packet);
2659 } else {
2660 update_mcast_snooping_table6__(ctx, flow, ms, vlan,
2661 in_xbundle, packet);
2662 }
2663 }
2664 ovs_rwlock_unlock(&ms->rwlock);
2665 }
2666
2667 /* send the packet to ports having the multicast group learned */
2668 static void
2669 xlate_normal_mcast_send_group(struct xlate_ctx *ctx,
2670 struct mcast_snooping *ms OVS_UNUSED,
2671 struct mcast_group *grp,
2672 struct xbundle *in_xbundle,
2673 const struct xvlan *xvlan)
2674 OVS_REQ_RDLOCK(ms->rwlock)
2675 {
2676 struct mcast_group_bundle *b;
2677 struct xbundle *mcast_xbundle;
2678
2679 LIST_FOR_EACH(b, bundle_node, &grp->bundle_lru) {
2680 mcast_xbundle = xbundle_lookup(ctx->xcfg, b->port);
2681 if (mcast_xbundle && mcast_xbundle != in_xbundle) {
2682 xlate_report(ctx, OFT_DETAIL, "forwarding to mcast group port");
2683 output_normal(ctx, mcast_xbundle, xvlan);
2684 } else if (!mcast_xbundle) {
2685 xlate_report(ctx, OFT_WARN,
2686 "mcast group port is unknown, dropping");
2687 } else {
2688 xlate_report(ctx, OFT_DETAIL,
2689 "mcast group port is input port, dropping");
2690 }
2691 }
2692 }
2693
2694 /* send the packet to ports connected to multicast routers */
2695 static void
2696 xlate_normal_mcast_send_mrouters(struct xlate_ctx *ctx,
2697 struct mcast_snooping *ms,
2698 struct xbundle *in_xbundle,
2699 const struct xvlan *xvlan)
2700 OVS_REQ_RDLOCK(ms->rwlock)
2701 {
2702 struct mcast_mrouter_bundle *mrouter;
2703 struct xbundle *mcast_xbundle;
2704
2705 LIST_FOR_EACH(mrouter, mrouter_node, &ms->mrouter_lru) {
2706 mcast_xbundle = xbundle_lookup(ctx->xcfg, mrouter->port);
2707 if (mcast_xbundle && mcast_xbundle != in_xbundle
2708 && mrouter->vlan == xvlan->v[0].vid) {
2709 xlate_report(ctx, OFT_DETAIL, "forwarding to mcast router port");
2710 output_normal(ctx, mcast_xbundle, xvlan);
2711 } else if (!mcast_xbundle) {
2712 xlate_report(ctx, OFT_WARN,
2713 "mcast router port is unknown, dropping");
2714 } else if (mrouter->vlan != xvlan->v[0].vid) {
2715 xlate_report(ctx, OFT_DETAIL,
2716 "mcast router is on another vlan, dropping");
2717 } else {
2718 xlate_report(ctx, OFT_DETAIL,
2719 "mcast router port is input port, dropping");
2720 }
2721 }
2722 }
2723
2724 /* send the packet to ports flagged to be flooded */
2725 static void
2726 xlate_normal_mcast_send_fports(struct xlate_ctx *ctx,
2727 struct mcast_snooping *ms,
2728 struct xbundle *in_xbundle,
2729 const struct xvlan *xvlan)
2730 OVS_REQ_RDLOCK(ms->rwlock)
2731 {
2732 struct mcast_port_bundle *fport;
2733 struct xbundle *mcast_xbundle;
2734
2735 LIST_FOR_EACH(fport, node, &ms->fport_list) {
2736 mcast_xbundle = xbundle_lookup(ctx->xcfg, fport->port);
2737 if (mcast_xbundle && mcast_xbundle != in_xbundle) {
2738 xlate_report(ctx, OFT_DETAIL, "forwarding to mcast flood port");
2739 output_normal(ctx, mcast_xbundle, xvlan);
2740 } else if (!mcast_xbundle) {
2741 xlate_report(ctx, OFT_WARN,
2742 "mcast flood port is unknown, dropping");
2743 } else {
2744 xlate_report(ctx, OFT_DETAIL,
2745 "mcast flood port is input port, dropping");
2746 }
2747 }
2748 }
2749
2750 /* forward the Reports to configured ports */
2751 static void
2752 xlate_normal_mcast_send_rports(struct xlate_ctx *ctx,
2753 struct mcast_snooping *ms,
2754 struct xbundle *in_xbundle,
2755 const struct xvlan *xvlan)
2756 OVS_REQ_RDLOCK(ms->rwlock)
2757 {
2758 struct mcast_port_bundle *rport;
2759 struct xbundle *mcast_xbundle;
2760
2761 LIST_FOR_EACH(rport, node, &ms->rport_list) {
2762 mcast_xbundle = xbundle_lookup(ctx->xcfg, rport->port);
2763 if (mcast_xbundle
2764 && mcast_xbundle != in_xbundle
2765 && mcast_xbundle->ofbundle != in_xbundle->ofbundle) {
2766 xlate_report(ctx, OFT_DETAIL,
2767 "forwarding report to mcast flagged port");
2768 output_normal(ctx, mcast_xbundle, xvlan);
2769 } else if (!mcast_xbundle) {
2770 xlate_report(ctx, OFT_WARN,
2771 "mcast port is unknown, dropping the report");
2772 } else {
2773 xlate_report(ctx, OFT_DETAIL,
2774 "mcast port is input port, dropping the Report");
2775 }
2776 }
2777 }
2778
2779 static void
2780 xlate_normal_flood(struct xlate_ctx *ctx, struct xbundle *in_xbundle,
2781 struct xvlan *xvlan)
2782 {
2783 struct xbundle *xbundle;
2784
2785 LIST_FOR_EACH (xbundle, list_node, &ctx->xbridge->xbundles) {
2786 if (xbundle != in_xbundle
2787 && xbundle->ofbundle != in_xbundle->ofbundle
2788 && xbundle_includes_vlan(xbundle, xvlan)
2789 && xbundle->floodable
2790 && !xbundle_mirror_out(ctx->xbridge, xbundle)) {
2791 output_normal(ctx, xbundle, xvlan);
2792 }
2793 }
2794 ctx->nf_output_iface = NF_OUT_FLOOD;
2795 }
2796
2797 static bool
2798 is_ip_local_multicast(const struct flow *flow, struct flow_wildcards *wc)
2799 {
2800 if (flow->dl_type == htons(ETH_TYPE_IP)) {
2801 memset(&wc->masks.nw_dst, 0xff, sizeof wc->masks.nw_dst);
2802 return ip_is_local_multicast(flow->nw_dst);
2803 } else if (flow->dl_type == htons(ETH_TYPE_IPV6)) {
2804 memset(&wc->masks.ipv6_dst, 0xff, sizeof wc->masks.ipv6_dst);
2805 return ipv6_is_all_hosts(&flow->ipv6_dst);
2806 } else {
2807 return false;
2808 }
2809 }
2810
2811 static void
2812 xlate_normal(struct xlate_ctx *ctx)
2813 {
2814 struct flow_wildcards *wc = ctx->wc;
2815 struct flow *flow = &ctx->xin->flow;
2816 struct xbundle *in_xbundle;
2817 struct xport *in_port;
2818 struct mac_entry *mac;
2819 void *mac_port;
2820 struct xvlan in_xvlan;
2821 struct xvlan xvlan;
2822 uint16_t vlan;
2823
2824 memset(&wc->masks.dl_src, 0xff, sizeof wc->masks.dl_src);
2825 memset(&wc->masks.dl_dst, 0xff, sizeof wc->masks.dl_dst);
2826 wc->masks.vlans[0].tci |= htons(VLAN_VID_MASK | VLAN_CFI);
2827
2828 in_xbundle = lookup_input_bundle(ctx, flow->in_port.ofp_port, &in_port);
2829 if (!in_xbundle) {
2830 xlate_report(ctx, OFT_WARN, "no input bundle, dropping");
2831 return;
2832 }
2833
2834 /* Drop malformed frames. */
2835 if (eth_type_vlan(flow->dl_type) &&
2836 !(flow->vlans[0].tci & htons(VLAN_CFI))) {
2837 if (ctx->xin->packet != NULL) {
2838 xlate_report_error(ctx, "dropping packet with partial "
2839 "VLAN tag received on port %s",
2840 in_xbundle->name);
2841 }
2842 xlate_report(ctx, OFT_WARN, "partial VLAN tag, dropping");
2843 return;
2844 }
2845
2846 /* Drop frames on bundles reserved for mirroring. */
2847 if (xbundle_mirror_out(ctx->xbridge, in_xbundle)) {
2848 if (ctx->xin->packet != NULL) {
2849 xlate_report_error(ctx, "dropping packet received on port %s, "
2850 "which is reserved exclusively for mirroring",
2851 in_xbundle->name);
2852 }
2853 xlate_report(ctx, OFT_WARN,
2854 "input port is mirror output port, dropping");
2855 return;
2856 }
2857
2858 /* Check VLAN. */
2859 xvlan_extract(flow, &in_xvlan);
2860 if (!input_vid_is_valid(ctx, in_xvlan.v[0].vid, in_xbundle)) {
2861 xlate_report(ctx, OFT_WARN,
2862 "disallowed VLAN VID for this input port, dropping");
2863 return;
2864 }
2865 xvlan_input_translate(in_xbundle, &in_xvlan, &xvlan);
2866 vlan = xvlan.v[0].vid;
2867
2868 /* Check other admissibility requirements. */
2869 if (in_port && !is_admissible(ctx, in_port, vlan)) {
2870 return;
2871 }
2872
2873 /* Learn source MAC. */
2874 bool is_grat_arp = is_gratuitous_arp(flow, wc);
2875 if (ctx->xin->allow_side_effects
2876 && flow->packet_type == htonl(PT_ETH)
2877 && in_port->pt_mode != NETDEV_PT_LEGACY_L3
2878 ) {
2879 update_learning_table(ctx, in_xbundle, flow->dl_src, vlan,
2880 is_grat_arp);
2881 }
2882 if (ctx->xin->xcache && in_xbundle != &ofpp_none_bundle) {
2883 struct xc_entry *entry;
2884
2885 /* Save just enough info to update mac learning table later. */
2886 entry = xlate_cache_add_entry(ctx->xin->xcache, XC_NORMAL);
2887 entry->normal.ofproto = ctx->xbridge->ofproto;
2888 entry->normal.in_port = flow->in_port.ofp_port;
2889 entry->normal.dl_src = flow->dl_src;
2890 entry->normal.vlan = vlan;
2891 entry->normal.is_gratuitous_arp = is_grat_arp;
2892 }
2893
2894 /* Determine output bundle. */
2895 if (mcast_snooping_enabled(ctx->xbridge->ms)
2896 && !eth_addr_is_broadcast(flow->dl_dst)
2897 && eth_addr_is_multicast(flow->dl_dst)
2898 && is_ip_any(flow)) {
2899 struct mcast_snooping *ms = ctx->xbridge->ms;
2900 struct mcast_group *grp = NULL;
2901
2902 if (is_igmp(flow, wc)) {
2903 /*
2904 * IGMP packets need to take the slow path, in order to be
2905 * processed for mdb updates. That will prevent expires
2906 * firing off even after hosts have sent reports.
2907 */
2908 ctx->xout->slow |= SLOW_ACTION;
2909
2910 memset(&wc->masks.tp_src, 0xff, sizeof wc->masks.tp_src);
2911 if (mcast_snooping_is_membership(flow->tp_src) ||
2912 mcast_snooping_is_query(flow->tp_src)) {
2913 if (ctx->xin->allow_side_effects && ctx->xin->packet) {
2914 update_mcast_snooping_table(ctx, flow, vlan,
2915 in_xbundle, ctx->xin->packet);
2916 }
2917 }
2918
2919 if (mcast_snooping_is_membership(flow->tp_src)) {
2920 ovs_rwlock_rdlock(&ms->rwlock);
2921 xlate_normal_mcast_send_mrouters(ctx, ms, in_xbundle, &xvlan);
2922 /* RFC4541: section 2.1.1, item 1: A snooping switch should
2923 * forward IGMP Membership Reports only to those ports where
2924 * multicast routers are attached. Alternatively stated: a
2925 * snooping switch should not forward IGMP Membership Reports
2926 * to ports on which only hosts are attached.
2927 * An administrative control may be provided to override this
2928 * restriction, allowing the report messages to be flooded to
2929 * other ports. */
2930 xlate_normal_mcast_send_rports(ctx, ms, in_xbundle, &xvlan);
2931 ovs_rwlock_unlock(&ms->rwlock);
2932 } else {
2933 xlate_report(ctx, OFT_DETAIL, "multicast traffic, flooding");
2934 xlate_normal_flood(ctx, in_xbundle, &xvlan);
2935 }
2936 return;
2937 } else if (is_mld(flow, wc)) {
2938 ctx->xout->slow |= SLOW_ACTION;
2939 if (ctx->xin->allow_side_effects && ctx->xin->packet) {
2940 update_mcast_snooping_table(ctx, flow, vlan,
2941 in_xbundle, ctx->xin->packet);
2942 }
2943 if (is_mld_report(flow, wc)) {
2944 ovs_rwlock_rdlock(&ms->rwlock);
2945 xlate_normal_mcast_send_mrouters(ctx, ms, in_xbundle, &xvlan);
2946 xlate_normal_mcast_send_rports(ctx, ms, in_xbundle, &xvlan);
2947 ovs_rwlock_unlock(&ms->rwlock);
2948 } else {
2949 xlate_report(ctx, OFT_DETAIL, "MLD query, flooding");
2950 xlate_normal_flood(ctx, in_xbundle, &xvlan);
2951 }
2952 } else {
2953 if (is_ip_local_multicast(flow, wc)) {
2954 /* RFC4541: section 2.1.2, item 2: Packets with a dst IP
2955 * address in the 224.0.0.x range which are not IGMP must
2956 * be forwarded on all ports */
2957 xlate_report(ctx, OFT_DETAIL,
2958 "RFC4541: section 2.1.2, item 2, flooding");
2959 xlate_normal_flood(ctx, in_xbundle, &xvlan);
2960 return;
2961 }
2962 }
2963
2964 /* forwarding to group base ports */
2965 ovs_rwlock_rdlock(&ms->rwlock);
2966 if (flow->dl_type == htons(ETH_TYPE_IP)) {
2967 grp = mcast_snooping_lookup4(ms, flow->nw_dst, vlan);
2968 } else if (flow->dl_type == htons(ETH_TYPE_IPV6)) {
2969 grp = mcast_snooping_lookup(ms, &flow->ipv6_dst, vlan);
2970 }
2971 if (grp) {
2972 xlate_normal_mcast_send_group(ctx, ms, grp, in_xbundle, &xvlan);
2973 xlate_normal_mcast_send_fports(ctx, ms, in_xbundle, &xvlan);
2974 xlate_normal_mcast_send_mrouters(ctx, ms, in_xbundle, &xvlan);
2975 } else {
2976 if (mcast_snooping_flood_unreg(ms)) {
2977 xlate_report(ctx, OFT_DETAIL,
2978 "unregistered multicast, flooding");
2979 xlate_normal_flood(ctx, in_xbundle, &xvlan);
2980 } else {
2981 xlate_normal_mcast_send_mrouters(ctx, ms, in_xbundle, &xvlan);
2982 xlate_normal_mcast_send_fports(ctx, ms, in_xbundle, &xvlan);
2983 }
2984 }
2985 ovs_rwlock_unlock(&ms->rwlock);
2986 } else {
2987 ovs_rwlock_rdlock(&ctx->xbridge->ml->rwlock);
2988 mac = mac_learning_lookup(ctx->xbridge->ml, flow->dl_dst, vlan);
2989 mac_port = mac ? mac_entry_get_port(ctx->xbridge->ml, mac) : NULL;
2990 ovs_rwlock_unlock(&ctx->xbridge->ml->rwlock);
2991
2992 if (mac_port) {
2993 struct xbundle *mac_xbundle = xbundle_lookup(ctx->xcfg, mac_port);
2994 if (mac_xbundle
2995 && mac_xbundle != in_xbundle
2996 && mac_xbundle->ofbundle != in_xbundle->ofbundle) {
2997 xlate_report(ctx, OFT_DETAIL, "forwarding to learned port");
2998 output_normal(ctx, mac_xbundle, &xvlan);
2999 } else if (!mac_xbundle) {
3000 xlate_report(ctx, OFT_WARN,
3001 "learned port is unknown, dropping");
3002 } else {
3003 xlate_report(ctx, OFT_DETAIL,
3004 "learned port is input port, dropping");
3005 }
3006 } else {
3007 xlate_report(ctx, OFT_DETAIL,
3008 "no learned MAC for destination, flooding");
3009 xlate_normal_flood(ctx, in_xbundle, &xvlan);
3010 }
3011 }
3012 }
3013
3014 /* Appends a "sample" action for sFlow or IPFIX to 'ctx->odp_actions'. The
3015 * 'probability' is the number of packets out of UINT32_MAX to sample. The
3016 * 'cookie' is passed back in the callback for each sampled packet.
3017 * 'tunnel_out_port', if not ODPP_NONE, is added as the
3018 * OVS_USERSPACE_ATTR_EGRESS_TUN_PORT attribute. If 'include_actions',
3019 * an OVS_USERSPACE_ATTR_ACTIONS attribute is added. If
3020 * 'emit_set_tunnel', sample(sampling_port=1) would translate into
3021 * datapath sample action set(tunnel(...)), sample(...) and it is used
3022 * for sampling egress tunnel information.
3023 */
3024 static size_t
3025 compose_sample_action(struct xlate_ctx *ctx,
3026 const uint32_t probability,
3027 const struct user_action_cookie *cookie,
3028 const odp_port_t tunnel_out_port,
3029 bool include_actions)
3030 {
3031 if (probability == 0) {
3032 /* No need to generate sampling or the inner action. */
3033 return 0;
3034 }
3035
3036 /* If the slow path meter is configured by the controller,
3037 * insert a meter action before the user space action. */
3038 struct ofproto *ofproto = &ctx->xin->ofproto->up;
3039 uint32_t meter_id = ofproto->slowpath_meter_id;
3040
3041 /* When meter action is not required, avoid generate sample action
3042 * for 100% sampling rate. */
3043 bool is_sample = probability < UINT32_MAX || meter_id != UINT32_MAX;
3044 size_t sample_offset, actions_offset;
3045 if (is_sample) {
3046 sample_offset = nl_msg_start_nested(ctx->odp_actions,
3047 OVS_ACTION_ATTR_SAMPLE);
3048 nl_msg_put_u32(ctx->odp_actions, OVS_SAMPLE_ATTR_PROBABILITY,
3049 probability);
3050 actions_offset = nl_msg_start_nested(ctx->odp_actions,
3051 OVS_SAMPLE_ATTR_ACTIONS);
3052 }
3053
3054 if (meter_id != UINT32_MAX) {
3055 nl_msg_put_u32(ctx->odp_actions, OVS_ACTION_ATTR_METER, meter_id);
3056 }
3057
3058 odp_port_t odp_port = ofp_port_to_odp_port(
3059 ctx->xbridge, ctx->xin->flow.in_port.ofp_port);
3060 uint32_t pid = dpif_port_get_pid(ctx->xbridge->dpif, odp_port,
3061 flow_hash_5tuple(&ctx->xin->flow, 0));
3062 size_t cookie_offset = odp_put_userspace_action(pid, cookie,
3063 sizeof *cookie,
3064 tunnel_out_port,
3065 include_actions,
3066 ctx->odp_actions);
3067
3068 if (is_sample) {
3069 nl_msg_end_nested(ctx->odp_actions, actions_offset);
3070 nl_msg_end_nested(ctx->odp_actions, sample_offset);
3071 }
3072
3073 return cookie_offset;
3074 }
3075
3076 /* If sFLow is not enabled, returns 0 without doing anything.
3077 *
3078 * If sFlow is enabled, appends a template "sample" action to the ODP actions
3079 * in 'ctx'. This action is a template because some of the information needed
3080 * to fill it out is not available until flow translation is complete. In this
3081 * case, this functions returns an offset, which is always nonzero, to pass
3082 * later to fix_sflow_action() to fill in the rest of the template. */
3083 static size_t
3084 compose_sflow_action(struct xlate_ctx *ctx)
3085 {
3086 struct dpif_sflow *sflow = ctx->xbridge->sflow;
3087 if (!sflow || ctx->xin->flow.in_port.ofp_port == OFPP_NONE) {
3088 return 0;
3089 }
3090
3091 struct user_action_cookie cookie = {
3092 .type = USER_ACTION_COOKIE_SFLOW,
3093 .ofp_in_port = ctx->xin->flow.in_port.ofp_port,
3094 .ofproto_uuid = ctx->xbridge->ofproto->uuid
3095 };
3096 return compose_sample_action(ctx, dpif_sflow_get_probability(sflow),
3097 &cookie, ODPP_NONE, true);
3098 }
3099
3100 /* If flow IPFIX is enabled, make sure IPFIX flow sample action
3101 * at egress point of tunnel port is just in front of corresponding
3102 * output action. If bridge IPFIX is enabled, this appends an IPFIX
3103 * sample action to 'ctx->odp_actions'. */
3104 static void
3105 compose_ipfix_action(struct xlate_ctx *ctx, odp_port_t output_odp_port)
3106 {
3107 struct dpif_ipfix *ipfix = ctx->xbridge->ipfix;
3108 odp_port_t tunnel_out_port = ODPP_NONE;
3109
3110 if (!ipfix || ctx->xin->flow.in_port.ofp_port == OFPP_NONE) {
3111 return;
3112 }
3113
3114 /* For input case, output_odp_port is ODPP_NONE, which is an invalid port
3115 * number. */
3116 if (output_odp_port == ODPP_NONE &&
3117 !dpif_ipfix_get_bridge_exporter_input_sampling(ipfix)) {
3118 return;
3119 }
3120
3121 /* For output case, output_odp_port is valid. */
3122 if (output_odp_port != ODPP_NONE) {
3123 if (!dpif_ipfix_get_bridge_exporter_output_sampling(ipfix)) {
3124 return;
3125 }
3126 /* If tunnel sampling is enabled, put an additional option attribute:
3127 * OVS_USERSPACE_ATTR_TUNNEL_OUT_PORT
3128 */
3129 if (dpif_ipfix_get_bridge_exporter_tunnel_sampling(ipfix) &&
3130 dpif_ipfix_is_tunnel_port(ipfix, output_odp_port) ) {
3131 tunnel_out_port = output_odp_port;
3132 }
3133 }
3134
3135 struct user_action_cookie cookie = {
3136 .type = USER_ACTION_COOKIE_IPFIX,
3137 .ofp_in_port = ctx->xin->flow.in_port.ofp_port,
3138 .ofproto_uuid = ctx->xbridge->ofproto->uuid,
3139 .ipfix.output_odp_port = output_odp_port
3140 };
3141 compose_sample_action(ctx,
3142 dpif_ipfix_get_bridge_exporter_probability(ipfix),
3143 &cookie, tunnel_out_port, false);
3144 }
3145
3146 /* Fix "sample" action according to data collected while composing ODP actions,
3147 * as described in compose_sflow_action().
3148 *
3149 * 'user_cookie_offset' must be the offset returned by
3150 * compose_sflow_action(). */
3151 static void
3152 fix_sflow_action(struct xlate_ctx *ctx, unsigned int user_cookie_offset)
3153 {
3154 const struct flow *base = &ctx->base_flow;
3155 struct user_action_cookie *cookie;
3156
3157 cookie = ofpbuf_at(ctx->odp_actions, user_cookie_offset, sizeof *cookie);
3158 ovs_assert(cookie->type == USER_ACTION_COOKIE_SFLOW);
3159
3160 cookie->sflow.vlan_tci = base->vlans[0].tci;
3161
3162 /* See http://www.sflow.org/sflow_version_5.txt (search for "Input/output
3163 * port information") for the interpretation of cookie->output. */
3164 switch (ctx->sflow_n_outputs) {
3165 case 0:
3166 /* 0x40000000 | 256 means "packet dropped for unknown reason". */
3167 cookie->sflow.output = 0x40000000 | 256;
3168 break;
3169
3170 case 1:
3171 cookie->sflow.output = dpif_sflow_odp_port_to_ifindex(
3172 ctx->xbridge->sflow, ctx->sflow_odp_port);
3173 if (cookie->sflow.output) {
3174 break;
3175 }
3176 /* Fall through. */
3177 default:
3178 /* 0x80000000 means "multiple output ports. */
3179 cookie->sflow.output = 0x80000000 | ctx->sflow_n_outputs;
3180 break;
3181 }
3182 }
3183
3184 static bool
3185 process_special(struct xlate_ctx *ctx, const struct xport *xport)
3186 {
3187 const struct flow *flow = &ctx->xin->flow;
3188 struct flow_wildcards *wc = ctx->wc;
3189 const struct xbridge *xbridge = ctx->xbridge;
3190 const struct dp_packet *packet = ctx->xin->packet;
3191 enum slow_path_reason slow;
3192
3193 if (!xport) {
3194 slow = 0;
3195 } else if (xport->cfm && cfm_should_process_flow(xport->cfm, flow, wc)) {
3196 if (packet) {
3197 cfm_process_heartbeat(xport->cfm, packet);
3198 }
3199 slow = SLOW_CFM;
3200 } else if (xport->bfd && bfd_should_process_flow(xport->bfd, flow, wc)) {
3201 if (packet) {
3202 bfd_process_packet(xport->bfd, flow, packet);
3203 /* If POLL received, immediately sends FINAL back. */
3204 if (bfd_should_send_packet(xport->bfd)) {
3205 ofproto_dpif_monitor_port_send_soon(xport->ofport);
3206 }
3207 }
3208 slow = SLOW_BFD;
3209 } else if (xport->xbundle && xport->xbundle->lacp
3210 && flow->dl_type == htons(ETH_TYPE_LACP)) {
3211 if (packet) {
3212 lacp_process_packet(xport->xbundle->lacp, xport->ofport, packet);
3213 }
3214 slow = SLOW_LACP;
3215 } else if ((xbridge->stp || xbridge->rstp) &&
3216 stp_should_process_flow(flow, wc)) {
3217 if (packet) {
3218 xbridge->stp
3219 ? stp_process_packet(xport, packet)
3220 : rstp_process_packet(xport, packet);
3221 }
3222 slow = SLOW_STP;
3223 } else if (xport->lldp && lldp_should_process_flow(xport->lldp, flow)) {
3224 if (packet) {
3225 lldp_process_packet(xport->lldp, packet);
3226 }
3227 slow = SLOW_LLDP;
3228 } else {
3229 slow = 0;
3230 }
3231
3232 if (slow) {
3233 ctx->xout->slow |= slow;
3234 return true;
3235 } else {
3236 return false;
3237 }
3238 }
3239
3240 static int
3241 tnl_route_lookup_flow(const struct xlate_ctx *ctx,
3242 const struct flow *oflow,
3243 struct in6_addr *ip, struct in6_addr *src,
3244 struct xport **out_port)
3245 {
3246 char out_dev[IFNAMSIZ];
3247 struct xbridge *xbridge;
3248 struct in6_addr gw;
3249 struct in6_addr dst;
3250
3251 dst = flow_tnl_dst(&oflow->tunnel);
3252 if (!ovs_router_lookup(oflow->pkt_mark, &dst, out_dev, src, &gw)) {
3253 return -ENOENT;
3254 }
3255
3256 if (ipv6_addr_is_set(&gw) &&
3257 (!IN6_IS_ADDR_V4MAPPED(&gw) || in6_addr_get_mapped_ipv4(&gw))) {
3258 *ip = gw;
3259 } else {
3260 *ip = dst;
3261 }
3262
3263 HMAP_FOR_EACH (xbridge, hmap_node, &ctx->xcfg->xbridges) {
3264 if (!strncmp(xbridge->name, out_dev, IFNAMSIZ)) {
3265 struct xport *port;
3266
3267 HMAP_FOR_EACH (port, ofp_node, &xbridge->xports) {
3268 if (!strncmp(netdev_get_name(port->netdev), out_dev, IFNAMSIZ)) {
3269 *out_port = port;
3270 return 0;
3271 }
3272 }
3273 }
3274 }
3275 return -ENOENT;
3276 }
3277
3278 static int
3279 compose_table_xlate(struct xlate_ctx *ctx, const struct xport *out_dev,
3280 struct dp_packet *packet)
3281 {
3282 struct xbridge *xbridge = out_dev->xbridge;
3283 struct ofpact_output output;
3284 struct flow flow;
3285
3286 ofpact_init(&output.ofpact, OFPACT_OUTPUT, sizeof output);
3287 flow_extract(packet, &flow);
3288 flow.in_port.ofp_port = out_dev->ofp_port;
3289 output.port = OFPP_TABLE;
3290 output.max_len = 0;
3291
3292 return ofproto_dpif_execute_actions__(xbridge->ofproto,
3293 ctx->xin->tables_version, &flow,
3294 NULL, &output.ofpact, sizeof output,
3295 ctx->depth, ctx->resubmits, packet);
3296 }
3297
3298 static void
3299 tnl_send_nd_request(struct xlate_ctx *ctx, const struct xport *out_dev,
3300 const struct eth_addr eth_src,
3301 struct in6_addr * ipv6_src, struct in6_addr * ipv6_dst)
3302 {
3303 struct dp_packet packet;
3304
3305 dp_packet_init(&packet, 0);
3306 compose_nd_ns(&packet, eth_src, ipv6_src, ipv6_dst);
3307 compose_table_xlate(ctx, out_dev, &packet);
3308 dp_packet_uninit(&packet);
3309 }
3310
3311 static void
3312 tnl_send_arp_request(struct xlate_ctx *ctx, const struct xport *out_dev,
3313 const struct eth_addr eth_src,
3314 ovs_be32 ip_src, ovs_be32 ip_dst)
3315 {
3316 struct dp_packet packet;
3317
3318 dp_packet_init(&packet, 0);
3319 compose_arp(&packet, ARP_OP_REQUEST,
3320 eth_src, eth_addr_zero, true, ip_src, ip_dst);
3321
3322 compose_table_xlate(ctx, out_dev, &packet);
3323 dp_packet_uninit(&packet);
3324 }
3325
3326 static void
3327 propagate_tunnel_data_to_flow__(struct flow *dst_flow,
3328 const struct flow *src_flow,
3329 struct eth_addr dmac, struct eth_addr smac,
3330 struct in6_addr s_ip6, ovs_be32 s_ip,
3331 bool is_tnl_ipv6, uint8_t nw_proto)
3332 {
3333 dst_flow->dl_dst = dmac;
3334 dst_flow->dl_src = smac;
3335
3336 dst_flow->packet_type = htonl(PT_ETH);
3337 dst_flow->nw_dst = src_flow->tunnel.ip_dst;
3338 dst_flow->nw_src = src_flow->tunnel.ip_src;
3339 dst_flow->ipv6_dst = src_flow->tunnel.ipv6_dst;
3340 dst_flow->ipv6_src = src_flow->tunnel.ipv6_src;
3341
3342 dst_flow->nw_frag = 0; /* Tunnel packets are unfragmented. */
3343 dst_flow->nw_tos = src_flow->tunnel.ip_tos;
3344 dst_flow->nw_ttl = src_flow->tunnel.ip_ttl;
3345 dst_flow->tp_dst = src_flow->tunnel.tp_dst;
3346 dst_flow->tp_src = src_flow->tunnel.tp_src;
3347
3348 if (is_tnl_ipv6) {
3349 dst_flow->dl_type = htons(ETH_TYPE_IPV6);
3350 if (ipv6_mask_is_any(&dst_flow->ipv6_src)
3351 && !ipv6_mask_is_any(&s_ip6)) {
3352 dst_flow->ipv6_src = s_ip6;
3353 }
3354 } else {
3355 dst_flow->dl_type = htons(ETH_TYPE_IP);
3356 if (dst_flow->nw_src == 0 && s_ip) {
3357 dst_flow->nw_src = s_ip;
3358 }
3359 }
3360 dst_flow->nw_proto = nw_proto;
3361 }
3362
3363 /*
3364 * Populate the 'flow' and 'base_flow' L3 fields to do the post tunnel push
3365 * translations.
3366 */
3367 static void
3368 propagate_tunnel_data_to_flow(struct xlate_ctx *ctx, struct eth_addr dmac,
3369 struct eth_addr smac, struct in6_addr s_ip6,
3370 ovs_be32 s_ip, bool is_tnl_ipv6,
3371 enum ovs_vport_type tnl_type)
3372 {
3373 struct flow *base_flow, *flow;
3374 flow = &ctx->xin->flow;
3375 base_flow = &ctx->base_flow;
3376 uint8_t nw_proto = 0;
3377
3378 switch (tnl_type) {
3379 case OVS_VPORT_TYPE_GRE:
3380 case OVS_VPORT_TYPE_ERSPAN:
3381 case OVS_VPORT_TYPE_IP6ERSPAN:
3382 case OVS_VPORT_TYPE_IP6GRE:
3383 nw_proto = IPPROTO_GRE;
3384 break;
3385 case OVS_VPORT_TYPE_VXLAN:
3386 case OVS_VPORT_TYPE_GENEVE:
3387 nw_proto = IPPROTO_UDP;
3388 break;
3389 case OVS_VPORT_TYPE_LISP:
3390 case OVS_VPORT_TYPE_STT:
3391 case OVS_VPORT_TYPE_UNSPEC:
3392 case OVS_VPORT_TYPE_NETDEV:
3393 case OVS_VPORT_TYPE_INTERNAL:
3394 case __OVS_VPORT_TYPE_MAX:
3395 default:
3396 OVS_NOT_REACHED();
3397 }
3398 /*
3399 * Update base_flow first followed by flow as the dst_flow gets modified
3400 * in the function.
3401 */
3402 propagate_tunnel_data_to_flow__(base_flow, flow, dmac, smac, s_ip6, s_ip,
3403 is_tnl_ipv6, nw_proto);
3404 propagate_tunnel_data_to_flow__(flow, flow, dmac, smac, s_ip6, s_ip,
3405 is_tnl_ipv6, nw_proto);
3406 }
3407
3408 static int
3409 native_tunnel_output(struct xlate_ctx *ctx, const struct xport *xport,
3410 const struct flow *flow, odp_port_t tunnel_odp_port,
3411 bool truncate)
3412 {
3413 struct netdev_tnl_build_header_params tnl_params;
3414 struct ovs_action_push_tnl tnl_push_data;
3415 struct xport *out_dev = NULL;
3416 ovs_be32 s_ip = 0, d_ip = 0;
3417 struct in6_addr s_ip6 = in6addr_any;
3418 struct in6_addr d_ip6 = in6addr_any;
3419 struct eth_addr smac;
3420 struct eth_addr dmac;
3421 int err;
3422 char buf_sip6[INET6_ADDRSTRLEN];
3423 char buf_dip6[INET6_ADDRSTRLEN];
3424
3425 /* Store sFlow data. */
3426 uint32_t sflow_n_outputs = ctx->sflow_n_outputs;
3427
3428 /* Structures to backup Ethernet and IP of base_flow. */
3429 struct flow old_base_flow;
3430 struct flow old_flow;
3431
3432 /* Backup flow & base_flow data. */
3433 memcpy(&old_base_flow, &ctx->base_flow, sizeof old_base_flow);
3434 memcpy(&old_flow, &ctx->xin->flow, sizeof old_flow);
3435
3436 if (flow->tunnel.ip_src) {
3437 in6_addr_set_mapped_ipv4(&s_ip6, flow->tunnel.ip_src);
3438 }
3439
3440 err = tnl_route_lookup_flow(ctx, flow, &d_ip6, &s_ip6, &out_dev);
3441 if (err) {
3442 xlate_report(ctx, OFT_WARN, "native tunnel routing failed");
3443 return err;
3444 }
3445
3446 xlate_report(ctx, OFT_DETAIL, "tunneling to %s via %s",
3447 ipv6_string_mapped(buf_dip6, &d_ip6),
3448 netdev_get_name(out_dev->netdev));
3449
3450 /* Use mac addr of bridge port of the peer. */
3451 err = netdev_get_etheraddr(out_dev->netdev, &smac);
3452 if (err) {
3453 xlate_report(ctx, OFT_WARN,
3454 "tunnel output device lacks Ethernet address");
3455 return err;
3456 }
3457
3458 d_ip = in6_addr_get_mapped_ipv4(&d_ip6);
3459 if (d_ip) {
3460 s_ip = in6_addr_get_mapped_ipv4(&s_ip6);
3461 }
3462
3463 err = tnl_neigh_lookup(out_dev->xbridge->name, &d_ip6, &dmac);
3464 if (err) {
3465 xlate_report(ctx, OFT_DETAIL,
3466 "neighbor cache miss for %s on bridge %s, "
3467 "sending %s request",
3468 buf_dip6, out_dev->xbridge->name, d_ip ? "ARP" : "ND");
3469 if (d_ip) {
3470 tnl_send_arp_request(ctx, out_dev, smac, s_ip, d_ip);
3471 } else {
3472 tnl_send_nd_request(ctx, out_dev, smac, &s_ip6, &d_ip6);
3473 }
3474 return err;
3475 }
3476
3477 if (ctx->xin->xcache) {
3478 struct xc_entry *entry;
3479
3480 entry = xlate_cache_add_entry(ctx->xin->xcache, XC_TNL_NEIGH);
3481 ovs_strlcpy(entry->tnl_neigh_cache.br_name, out_dev->xbridge->name,
3482 sizeof entry->tnl_neigh_cache.br_name);
3483 entry->tnl_neigh_cache.d_ipv6 = d_ip6;
3484 }
3485
3486 xlate_report(ctx, OFT_DETAIL, "tunneling from "ETH_ADDR_FMT" %s"
3487 " to "ETH_ADDR_FMT" %s",
3488 ETH_ADDR_ARGS(smac), ipv6_string_mapped(buf_sip6, &s_ip6),
3489 ETH_ADDR_ARGS(dmac), buf_dip6);
3490
3491 netdev_init_tnl_build_header_params(&tnl_params, flow, &s_ip6, dmac, smac);
3492 err = tnl_port_build_header(xport->ofport, &tnl_push_data, &tnl_params);
3493 if (err) {
3494 return err;
3495 }
3496 tnl_push_data.tnl_port = tunnel_odp_port;
3497 tnl_push_data.out_port = out_dev->odp_port;
3498
3499 /* After tunnel header has been added, MAC and IP data of flow and
3500 * base_flow need to be set properly, since there is not recirculation
3501 * any more when sending packet to tunnel. */
3502
3503 propagate_tunnel_data_to_flow(ctx, dmac, smac, s_ip6,
3504 s_ip, tnl_params.is_ipv6,
3505 tnl_push_data.tnl_type);
3506
3507 size_t clone_ofs = 0;
3508 size_t push_action_size;
3509
3510 clone_ofs = nl_msg_start_nested(ctx->odp_actions, OVS_ACTION_ATTR_CLONE);
3511 odp_put_tnl_push_action(ctx->odp_actions, &tnl_push_data);
3512 push_action_size = ctx->odp_actions->size;
3513
3514 if (!truncate) {
3515 const struct dpif_flow_stats *backup_resubmit_stats;
3516 struct xlate_cache *backup_xcache;
3517 struct flow_wildcards *backup_wc, wc;
3518 bool backup_side_effects;
3519 const struct dp_packet *backup_packet;
3520
3521 memset(&wc, 0 , sizeof wc);
3522 backup_wc = ctx->wc;
3523 ctx->wc = &wc;
3524 ctx->xin->wc = NULL;
3525 backup_resubmit_stats = ctx->xin->resubmit_stats;
3526 backup_xcache = ctx->xin->xcache;
3527 backup_side_effects = ctx->xin->allow_side_effects;
3528 backup_packet = ctx->xin->packet;
3529
3530 ctx->xin->resubmit_stats = NULL;
3531 ctx->xin->xcache = xlate_cache_new(); /* Use new temporary cache. */
3532 ctx->xin->allow_side_effects = false;
3533 ctx->xin->packet = NULL;
3534
3535 /* Push the cache entry for the tunnel first. */
3536 struct xc_entry *entry;
3537 entry = xlate_cache_add_entry(ctx->xin->xcache, XC_TUNNEL_HEADER);
3538 entry->tunnel_hdr.hdr_size = tnl_push_data.header_len;
3539 entry->tunnel_hdr.operation = ADD;
3540
3541 patch_port_output(ctx, xport, out_dev);
3542
3543 /* Similar to the stats update in revalidation, the x_cache entries
3544 * are populated by the previous translation are used to update the
3545 * stats correctly.
3546 */
3547 if (backup_resubmit_stats) {
3548 struct dpif_flow_stats stats = *backup_resubmit_stats;
3549 xlate_push_stats(ctx->xin->xcache, &stats);
3550 }
3551 xlate_cache_steal_entries(backup_xcache, ctx->xin->xcache);
3552
3553 if (ctx->odp_actions->size > push_action_size) {
3554 nl_msg_end_non_empty_nested(ctx->odp_actions, clone_ofs);
3555 } else {
3556 nl_msg_cancel_nested(ctx->odp_actions, clone_ofs);
3557 /* XXX : There is no real use-case for a tunnel push without
3558 * any post actions. However keeping it now
3559 * as is to make the 'make check' happy. Should remove when all the
3560 * make check tunnel test case does something meaningful on a
3561 * tunnel encap packets.
3562 */
3563 odp_put_tnl_push_action(ctx->odp_actions, &tnl_push_data);
3564 }
3565
3566 /* Restore context status. */
3567 ctx->xin->resubmit_stats = backup_resubmit_stats;
3568 xlate_cache_delete(ctx->xin->xcache);
3569 ctx->xin->xcache = backup_xcache;
3570 ctx->xin->allow_side_effects = backup_side_effects;
3571 ctx->xin->packet = backup_packet;
3572 ctx->wc = backup_wc;
3573 } else {
3574 /* In order to maintain accurate stats, use recirc for
3575 * natvie tunneling. */
3576 nl_msg_put_u32(ctx->odp_actions, OVS_ACTION_ATTR_RECIRC, 0);
3577 nl_msg_end_nested(ctx->odp_actions, clone_ofs);
3578 }
3579
3580 /* Restore the flows after the translation. */
3581 memcpy(&ctx->xin->flow, &old_flow, sizeof ctx->xin->flow);
3582 memcpy(&ctx->base_flow, &old_base_flow, sizeof ctx->base_flow);
3583
3584 /* Restore sFlow data. */
3585 ctx->sflow_n_outputs = sflow_n_outputs;
3586
3587 return 0;
3588 }
3589
3590 static void
3591 xlate_commit_actions(struct xlate_ctx *ctx)
3592 {
3593 bool use_masked = ctx->xbridge->support.masked_set_action;
3594
3595 ctx->xout->slow |= commit_odp_actions(&ctx->xin->flow, &ctx->base_flow,
3596 ctx->odp_actions, ctx->wc,
3597 use_masked, ctx->pending_encap,
3598 ctx->pending_decap, ctx->encap_data);
3599 ctx->pending_encap = false;
3600 ctx->pending_decap = false;
3601 ofpbuf_delete(ctx->encap_data);
3602 ctx->encap_data = NULL;
3603 }
3604
3605 static void
3606 clear_conntrack(struct xlate_ctx *ctx)
3607 {
3608 ctx->conntracked = false;
3609 flow_clear_conntrack(&ctx->xin->flow);
3610 }
3611
3612 static bool
3613 xlate_flow_is_protected(const struct xlate_ctx *ctx, const struct flow *flow, const struct xport *xport_out)
3614 {
3615 const struct xport *xport_in;
3616
3617 if (!xport_out) {
3618 return false;
3619 }
3620
3621 xport_in = get_ofp_port(ctx->xbridge, flow->in_port.ofp_port);
3622
3623 return (xport_in && xport_in->xbundle && xport_out->xbundle &&
3624 xport_in->xbundle->protected && xport_out->xbundle->protected);
3625 }
3626
3627 /* Function handles when a packet is sent from one bridge to another bridge.
3628 *
3629 * The bridges are internally connected, either with patch ports or with
3630 * tunnel ports.
3631 *
3632 * The output action to another bridge causes translation to continue within
3633 * the next bridge. This process can be recursive; the next bridge can
3634 * output yet to another bridge.
3635 *
3636 * The translated actions from the second bridge onwards are enclosed within
3637 * the clone action, so that any modification to the packet will not be visible
3638 * to the remaining actions of the originating bridge.
3639 */
3640 static void
3641 patch_port_output(struct xlate_ctx *ctx, const struct xport *in_dev,
3642 struct xport *out_dev)
3643 {
3644 struct flow *flow = &ctx->xin->flow;
3645 struct flow old_flow = ctx->xin->flow;
3646 struct flow_tnl old_flow_tnl_wc = ctx->wc->masks.tunnel;
3647 bool old_conntrack = ctx->conntracked;
3648 bool old_was_mpls = ctx->was_mpls;
3649 ovs_version_t old_version = ctx->xin->tables_version;
3650 struct ofpbuf old_stack = ctx->stack;
3651 uint8_t new_stack[1024];
3652 struct ofpbuf old_action_set = ctx->action_set;
3653 struct ovs_list *old_trace = ctx->xin->trace;
3654 uint64_t actset_stub[1024 / 8];
3655
3656 ofpbuf_use_stub(&ctx->stack, new_stack, sizeof new_stack);
3657 ofpbuf_use_stub(&ctx->action_set, actset_stub, sizeof actset_stub);
3658 flow->in_port.ofp_port = out_dev->ofp_port;
3659 flow->metadata = htonll(0);
3660 memset(&flow->tunnel, 0, sizeof flow->tunnel);
3661 memset(&ctx->wc->masks.tunnel, 0, sizeof ctx->wc->masks.tunnel);
3662 flow->tunnel.metadata.tab =
3663 ofproto_get_tun_tab(&out_dev->xbridge->ofproto->up);
3664 ctx->wc->masks.tunnel.metadata.tab = flow->tunnel.metadata.tab;
3665 memset(flow->regs, 0, sizeof flow->regs);
3666 flow->actset_output = OFPP_UNSET;
3667 clear_conntrack(ctx);
3668 ctx->xin->trace = xlate_report(ctx, OFT_BRIDGE, "bridge(\"%s\")",
3669 out_dev->xbridge->name);
3670 mirror_mask_t old_mirrors = ctx->mirrors;
3671 bool independent_mirrors = out_dev->xbridge != ctx->xbridge;
3672 if (independent_mirrors) {
3673 ctx->mirrors = 0;
3674 }
3675 ctx->xbridge = out_dev->xbridge;
3676
3677 /* The bridge is now known so obtain its table version. */
3678 ctx->xin->tables_version
3679 = ofproto_dpif_get_tables_version(ctx->xbridge->ofproto);
3680
3681 if (!process_special(ctx, out_dev) && may_receive(out_dev, ctx)) {
3682 if (xport_stp_forward_state(out_dev) &&
3683 xport_rstp_forward_state(out_dev)) {
3684 xlate_table_action(ctx, flow->in_port.ofp_port, 0, true, true,
3685 false, true, clone_xlate_actions);
3686 if (!ctx->freezing) {
3687 xlate_action_set(ctx);
3688 }
3689 if (ctx->freezing) {
3690 finish_freezing(ctx);
3691 }
3692 } else {
3693 /* Forwarding is disabled by STP and RSTP. Let OFPP_NORMAL and
3694 * the learning action look at the packet, then drop it. */
3695 struct flow old_base_flow = ctx->base_flow;
3696 size_t old_size = ctx->odp_actions->size;
3697 mirror_mask_t old_mirrors2 = ctx->mirrors;
3698
3699 xlate_table_action(ctx, flow->in_port.ofp_port, 0, true, true,
3700 false, true, clone_xlate_actions);
3701 ctx->mirrors = old_mirrors2;
3702 ctx->base_flow = old_base_flow;
3703 ctx->odp_actions->size = old_size;
3704
3705 /* Undo changes that may have been done for freezing. */
3706 ctx_cancel_freeze(ctx);
3707 }
3708 }
3709
3710 ctx->xin->trace = old_trace;
3711 if (independent_mirrors) {
3712 ctx->mirrors = old_mirrors;
3713 }
3714 ctx->xin->flow = old_flow;
3715 ctx->xbridge = in_dev->xbridge;
3716 ofpbuf_uninit(&ctx->action_set);
3717 ctx->action_set = old_action_set;
3718 ofpbuf_uninit(&ctx->stack);
3719 ctx->stack = old_stack;
3720
3721 /* Restore calling bridge's lookup version. */
3722 ctx->xin->tables_version = old_version;
3723
3724 /* Restore to calling bridge tunneling information */
3725 ctx->wc->masks.tunnel = old_flow_tnl_wc;
3726
3727 /* The out bridge popping MPLS should have no effect on the original
3728 * bridge. */
3729 ctx->was_mpls = old_was_mpls;
3730
3731 /* The out bridge's conntrack execution should have no effect on the
3732 * original bridge. */
3733 ctx->conntracked = old_conntrack;
3734
3735 /* The fact that the out bridge exits (for any reason) does not mean
3736 * that the original bridge should exit. Specifically, if the out
3737 * bridge freezes translation, the original bridge must continue
3738 * processing with the original, not the frozen packet! */
3739 ctx->exit = false;
3740
3741 /* Out bridge errors do not propagate back. */
3742 ctx->error = XLATE_OK;
3743
3744 if (ctx->xin->resubmit_stats) {
3745 netdev_vport_inc_tx(in_dev->netdev, ctx->xin->resubmit_stats);
3746 netdev_vport_inc_rx(out_dev->netdev, ctx->xin->resubmit_stats);
3747 if (out_dev->bfd) {
3748 bfd_account_rx(out_dev->bfd, ctx->xin->resubmit_stats);
3749 }
3750 }
3751 if (ctx->xin->xcache) {
3752 struct xc_entry *entry;
3753
3754 entry = xlate_cache_add_entry(ctx->xin->xcache, XC_NETDEV);
3755 entry->dev.tx = netdev_ref(in_dev->netdev);
3756 entry->dev.rx = netdev_ref(out_dev->netdev);
3757 entry->dev.bfd = bfd_ref(out_dev->bfd);
3758 }
3759 }
3760
3761 static bool
3762 check_output_prerequisites(struct xlate_ctx *ctx,
3763 const struct xport *xport,
3764 struct flow *flow,
3765 bool check_stp)
3766 {
3767 struct flow_wildcards *wc = ctx->wc;
3768
3769 if (!xport) {
3770 xlate_report(ctx, OFT_WARN, "Nonexistent output port");
3771 return false;
3772 } else if (xport->config & OFPUTIL_PC_NO_FWD) {
3773 xlate_report(ctx, OFT_DETAIL, "OFPPC_NO_FWD set, skipping output");
3774 return false;
3775 } else if (ctx->mirror_snaplen != 0 && xport->odp_port == ODPP_NONE) {
3776 xlate_report(ctx, OFT_WARN,
3777 "Mirror truncate to ODPP_NONE, skipping output");
3778 return false;
3779 } else if (xlate_flow_is_protected(ctx, flow, xport)) {
3780 xlate_report(ctx, OFT_WARN,
3781 "Flow is between protected ports, skipping output.");
3782 return false;
3783 } else if (check_stp) {
3784 if (is_stp(&ctx->base_flow)) {
3785 if (!xport_stp_should_forward_bpdu(xport) &&
3786 !xport_rstp_should_manage_bpdu(xport)) {
3787 if (ctx->xbridge->stp != NULL) {
3788 xlate_report(ctx, OFT_WARN,
3789 "STP not in listening state, "
3790 "skipping bpdu output");
3791 } else if (ctx->xbridge->rstp != NULL) {
3792 xlate_report(ctx, OFT_WARN,
3793 "RSTP not managing BPDU in this state, "
3794 "skipping bpdu output");
3795 }
3796 return false;
3797 }
3798 } else if ((xport->cfm && cfm_should_process_flow(xport->cfm, flow, wc))
3799 || (xport->bfd && bfd_should_process_flow(xport->bfd, flow,
3800 wc))) {
3801 /* Pass; STP should not block link health detection. */
3802 } else if (!xport_stp_forward_state(xport) ||
3803 !xport_rstp_forward_state(xport)) {
3804 if (ctx->xbridge->stp != NULL) {
3805 xlate_report(ctx, OFT_WARN,
3806 "STP not in forwarding state, skipping output");
3807 } else if (ctx->xbridge->rstp != NULL) {
3808 xlate_report(ctx, OFT_WARN,
3809 "RSTP not in forwarding state, skipping output");
3810 }
3811 return false;
3812 }
3813 }
3814
3815 if (xport->pt_mode == NETDEV_PT_LEGACY_L2 &&
3816 flow->packet_type != htonl(PT_ETH)) {
3817 xlate_report(ctx, OFT_WARN, "Trying to send non-Ethernet packet "
3818 "through legacy L2 port. Dropping packet.");
3819 return false;
3820 }
3821
3822 return true;
3823 }
3824
3825 /* Function verifies if destination address of received Neighbor Advertisement
3826 * message stored in 'flow' is correct. It should be either FF02::1:FFXX:XXXX
3827 * where XX:XXXX stands for the last 24 bits of 'ipv6_addr' or it should match
3828 * 'ipv6_addr'. */
3829 static bool
3830 is_nd_dst_correct(const struct flow *flow, const struct in6_addr *ipv6_addr)
3831 {
3832 const uint8_t *flow_ipv6_addr = (uint8_t *) &flow->ipv6_dst;
3833 const uint8_t *addr = (uint8_t *) ipv6_addr;
3834
3835 return (IN6_IS_ADDR_MC_LINKLOCAL(flow_ipv6_addr) &&
3836 flow_ipv6_addr[11] == 0x01 &&
3837 flow_ipv6_addr[12] == 0xff &&
3838 flow_ipv6_addr[13] == addr[13] &&
3839 flow_ipv6_addr[14] == addr[14] &&
3840 flow_ipv6_addr[15] == addr[15]) ||
3841 IN6_ARE_ADDR_EQUAL(&flow->ipv6_dst, ipv6_addr);
3842 }
3843
3844 /* Function verifies if the ARP reply or Neighbor Advertisement represented by
3845 * 'flow' addresses the 'xbridge' of 'ctx'. Returns true if the ARP TA or
3846 * neighbor discovery destination is in the list of configured IP addresses of
3847 * the bridge. Otherwise, it returns false. */
3848 static bool
3849 is_neighbor_reply_correct(const struct xlate_ctx *ctx, const struct flow *flow)
3850 {
3851 bool ret = false;
3852 int i;
3853 struct xbridge_addr *xbridge_addr = xbridge_addr_ref(ctx->xbridge->addr);
3854
3855 /* Verify if 'nw_dst' of ARP or 'ipv6_dst' of ICMPV6 is in the list. */
3856 for (i = 0; xbridge_addr && i < xbridge_addr->n_addr; i++) {
3857 struct in6_addr *ip_addr = &xbridge_addr->addr[i];
3858 if ((IN6_IS_ADDR_V4MAPPED(ip_addr) &&
3859 flow->dl_type == htons(ETH_TYPE_ARP) &&
3860 in6_addr_get_mapped_ipv4(ip_addr) == flow->nw_dst) ||
3861 (!IN6_IS_ADDR_V4MAPPED(ip_addr) &&
3862 is_nd_dst_correct(flow, ip_addr))) {
3863 /* Found a match. */
3864 ret = true;
3865 break;
3866 }
3867 }
3868
3869 xbridge_addr_unref(xbridge_addr);
3870 return ret;
3871 }
3872
3873 static bool
3874 terminate_native_tunnel(struct xlate_ctx *ctx, ofp_port_t ofp_port,
3875 struct flow *flow, struct flow_wildcards *wc,
3876 odp_port_t *tnl_port)
3877 {
3878 *tnl_port = ODPP_NONE;
3879
3880 /* XXX: Write better Filter for tunnel port. We can use in_port
3881 * in tunnel-port flow to avoid these checks completely. */
3882 if (ofp_port == OFPP_LOCAL &&
3883 ovs_native_tunneling_is_on(ctx->xbridge->ofproto)) {
3884 *tnl_port = tnl_port_map_lookup(flow, wc);
3885
3886 /* If no tunnel port was found and it's about an ARP or ICMPv6 packet,
3887 * do tunnel neighbor snooping. */
3888 if (*tnl_port == ODPP_NONE &&
3889 (flow->dl_type == htons(ETH_TYPE_ARP) ||
3890 flow->nw_proto == IPPROTO_ICMPV6) &&
3891 is_neighbor_reply_correct(ctx, flow)) {
3892 tnl_neigh_snoop(flow, wc, ctx->xbridge->name);
3893 }
3894 }
3895
3896 return *tnl_port != ODPP_NONE;
3897 }
3898
3899 static void
3900 compose_output_action__(struct xlate_ctx *ctx, ofp_port_t ofp_port,
3901 const struct xlate_bond_recirc *xr, bool check_stp,
3902 bool is_last_action OVS_UNUSED, bool truncate)
3903 {
3904 const struct xport *xport = get_ofp_port(ctx->xbridge, ofp_port);
3905 struct flow_wildcards *wc = ctx->wc;
3906 struct flow *flow = &ctx->xin->flow;
3907 struct flow_tnl flow_tnl;
3908 union flow_vlan_hdr flow_vlans[FLOW_MAX_VLAN_HEADERS];
3909 uint8_t flow_nw_tos;
3910 odp_port_t out_port, odp_port, odp_tnl_port;
3911 bool is_native_tunnel = false;
3912 uint8_t dscp;
3913 struct eth_addr flow_dl_dst = flow->dl_dst;
3914 struct eth_addr flow_dl_src = flow->dl_src;
3915 ovs_be32 flow_packet_type = flow->packet_type;
3916 ovs_be16 flow_dl_type = flow->dl_type;
3917
3918 /* If 'struct flow' gets additional metadata, we'll need to zero it out
3919 * before traversing a patch port. */
3920 BUILD_ASSERT_DECL(FLOW_WC_SEQ == 41);
3921 memset(&flow_tnl, 0, sizeof flow_tnl);
3922
3923 if (!check_output_prerequisites(ctx, xport, flow, check_stp)) {
3924 return;
3925 }
3926
3927 if (flow->packet_type == htonl(PT_ETH)) {
3928 /* Strip Ethernet header for legacy L3 port. */
3929 if (xport->pt_mode == NETDEV_PT_LEGACY_L3) {
3930 flow->packet_type = PACKET_TYPE_BE(OFPHTN_ETHERTYPE,
3931 ntohs(flow->dl_type));
3932 }
3933 }
3934
3935 if (xport->peer) {
3936 if (truncate) {
3937 xlate_report_error(ctx, "Cannot truncate output to patch port");
3938 }
3939 patch_port_output(ctx, xport, xport->peer);
3940 return;
3941 }
3942
3943 memcpy(flow_vlans, flow->vlans, sizeof flow_vlans);
3944 flow_nw_tos = flow->nw_tos;
3945
3946 if (count_skb_priorities(xport)) {
3947 memset(&wc->masks.skb_priority, 0xff, sizeof wc->masks.skb_priority);
3948 if (dscp_from_skb_priority(xport, flow->skb_priority, &dscp)) {
3949 wc->masks.nw_tos |= IP_DSCP_MASK;
3950 flow->nw_tos &= ~IP_DSCP_MASK;
3951 flow->nw_tos |= dscp;
3952 }
3953 }
3954
3955 if (xport->is_tunnel) {
3956 struct in6_addr dst;
3957 /* Save tunnel metadata so that changes made due to
3958 * the Logical (tunnel) Port are not visible for any further
3959 * matches, while explicit set actions on tunnel metadata are.
3960 */
3961 flow_tnl = flow->tunnel;
3962 odp_port = tnl_port_send(xport->ofport, flow, ctx->wc);
3963 if (odp_port == ODPP_NONE) {
3964 xlate_report(ctx, OFT_WARN, "Tunneling decided against output");
3965 goto out; /* restore flow_nw_tos */
3966 }
3967 dst = flow_tnl_dst(&flow->tunnel);
3968 if (ipv6_addr_equals(&dst, &ctx->orig_tunnel_ipv6_dst)) {
3969 xlate_report(ctx, OFT_WARN, "Not tunneling to our own address");
3970 goto out; /* restore flow_nw_tos */
3971 }
3972 if (ctx->xin->resubmit_stats) {
3973 netdev_vport_inc_tx(xport->netdev, ctx->xin->resubmit_stats);
3974 }
3975 if (ctx->xin->xcache) {
3976 struct xc_entry *entry;
3977
3978 entry = xlate_cache_add_entry(ctx->xin->xcache, XC_NETDEV);
3979 entry->dev.tx = netdev_ref(xport->netdev);
3980 }
3981 out_port = odp_port;
3982 if (ovs_native_tunneling_is_on(ctx->xbridge->ofproto)) {
3983 xlate_report(ctx, OFT_DETAIL, "output to native tunnel");
3984 is_native_tunnel = true;
3985 } else {
3986 const char *tnl_type;
3987
3988 xlate_report(ctx, OFT_DETAIL, "output to kernel tunnel");
3989 tnl_type = tnl_port_get_type(xport->ofport);
3990 commit_odp_tunnel_action(flow, &ctx->base_flow,
3991 ctx->odp_actions, tnl_type);
3992 flow->tunnel = flow_tnl; /* Restore tunnel metadata */
3993 }
3994 } else {
3995 odp_port = xport->odp_port;
3996 out_port = odp_port;
3997 }
3998
3999 if (out_port != ODPP_NONE) {
4000 /* Commit accumulated flow updates before output. */
4001 xlate_commit_actions(ctx);
4002
4003 if (xr) {
4004 /* Recirculate the packet. */
4005 struct ovs_action_hash *act_hash;
4006
4007 /* Hash action. */
4008 enum ovs_hash_alg hash_alg = xr->hash_alg;
4009 if (hash_alg > ctx->xbridge->support.max_hash_alg) {
4010 /* Algorithm supported by all datapaths. */
4011 hash_alg = OVS_HASH_ALG_L4;
4012 }
4013 act_hash = nl_msg_put_unspec_uninit(ctx->odp_actions,
4014 OVS_ACTION_ATTR_HASH,
4015 sizeof *act_hash);
4016 act_hash->hash_alg = hash_alg;
4017 act_hash->hash_basis = xr->hash_basis;
4018
4019 /* Recirc action. */
4020 nl_msg_put_u32(ctx->odp_actions, OVS_ACTION_ATTR_RECIRC,
4021 xr->recirc_id);
4022 } else if (is_native_tunnel) {
4023 /* Output to native tunnel port. */
4024 native_tunnel_output(ctx, xport, flow, odp_port, truncate);
4025 flow->tunnel = flow_tnl; /* Restore tunnel metadata */
4026
4027 } else if (terminate_native_tunnel(ctx, ofp_port, flow, wc,
4028 &odp_tnl_port)) {
4029 /* Intercept packet to be received on native tunnel port. */
4030 nl_msg_put_odp_port(ctx->odp_actions, OVS_ACTION_ATTR_TUNNEL_POP,
4031 odp_tnl_port);
4032
4033 } else {
4034 /* Tunnel push-pop action is not compatible with
4035 * IPFIX action. */
4036 compose_ipfix_action(ctx, out_port);
4037
4038 /* Handle truncation of the mirrored packet. */
4039 if (ctx->mirror_snaplen > 0 &&
4040 ctx->mirror_snaplen < UINT16_MAX) {
4041 struct ovs_action_trunc *trunc;
4042
4043 trunc = nl_msg_put_unspec_uninit(ctx->odp_actions,
4044 OVS_ACTION_ATTR_TRUNC,
4045 sizeof *trunc);
4046 trunc->max_len = ctx->mirror_snaplen;
4047 if (!ctx->xbridge->support.trunc) {
4048 ctx->xout->slow |= SLOW_ACTION;
4049 }
4050 }
4051
4052 nl_msg_put_odp_port(ctx->odp_actions,
4053 OVS_ACTION_ATTR_OUTPUT,
4054 out_port);
4055 }
4056
4057 ctx->sflow_odp_port = odp_port;
4058 ctx->sflow_n_outputs++;
4059 ctx->nf_output_iface = ofp_port;
4060 }
4061
4062 if (mbridge_has_mirrors(ctx->xbridge->mbridge) && xport->xbundle) {
4063 mirror_packet(ctx, xport->xbundle,
4064 xbundle_mirror_dst(xport->xbundle->xbridge,
4065 xport->xbundle));
4066 }
4067
4068 out:
4069 /* Restore flow */
4070 memcpy(flow->vlans, flow_vlans, sizeof flow->vlans);
4071 flow->nw_tos = flow_nw_tos;
4072 flow->dl_dst = flow_dl_dst;
4073 flow->dl_src = flow_dl_src;
4074 flow->packet_type = flow_packet_type;
4075 flow->dl_type = flow_dl_type;
4076 }
4077
4078 static void
4079 compose_output_action(struct xlate_ctx *ctx, ofp_port_t ofp_port,
4080 const struct xlate_bond_recirc *xr,
4081 bool is_last_action, bool truncate)
4082 {
4083 compose_output_action__(ctx, ofp_port, xr, true,
4084 is_last_action, truncate);
4085 }
4086
4087 static void
4088 xlate_recursively(struct xlate_ctx *ctx, struct rule_dpif *rule,
4089 bool deepens, bool is_last_action,
4090 xlate_actions_handler *actions_xlator)
4091 {
4092 struct rule_dpif *old_rule = ctx->rule;
4093 ovs_be64 old_cookie = ctx->rule_cookie;
4094 const struct rule_actions *actions;
4095
4096 if (ctx->xin->resubmit_stats) {
4097 rule_dpif_credit_stats(rule, ctx->xin->resubmit_stats);
4098 }
4099
4100 ctx->resubmits++;
4101
4102 ctx->depth += deepens;
4103 ctx->rule = rule;
4104 ctx->rule_cookie = rule->up.flow_cookie;
4105 actions = rule_get_actions(&rule->up);
4106 actions_xlator(actions->ofpacts, actions->ofpacts_len, ctx,
4107 is_last_action);
4108 ctx->rule_cookie = old_cookie;
4109 ctx->rule = old_rule;
4110 ctx->depth -= deepens;
4111 }
4112
4113 static bool
4114 xlate_resubmit_resource_check(struct xlate_ctx *ctx)
4115 {
4116 if (ctx->depth >= MAX_DEPTH) {
4117 xlate_report_error(ctx, "over max translation depth %d", MAX_DEPTH);
4118 ctx->error = XLATE_RECURSION_TOO_DEEP;
4119 } else if (ctx->resubmits >= MAX_RESUBMITS) {
4120 xlate_report_error(ctx, "over %d resubmit actions", MAX_RESUBMITS);
4121 ctx->error = XLATE_TOO_MANY_RESUBMITS;
4122 } else if (ctx->odp_actions->size > UINT16_MAX) {
4123 xlate_report_error(ctx, "resubmits yielded over 64 kB of actions");
4124 /* NOT an error, as we'll be slow-pathing the flow in this case? */
4125 ctx->exit = true; /* XXX: translation still terminated! */
4126 } else if (ctx->stack.size >= 65536) {
4127 xlate_report_error(ctx, "resubmits yielded over 64 kB of stack");
4128 ctx->error = XLATE_STACK_TOO_DEEP;
4129 } else {
4130 return true;
4131 }
4132
4133 return false;
4134 }
4135
4136 static void
4137 tuple_swap_flow(struct flow *flow, bool ipv4)
4138 {
4139 uint8_t nw_proto = flow->nw_proto;
4140 flow->nw_proto = flow->ct_nw_proto;
4141 flow->ct_nw_proto = nw_proto;
4142
4143 if (ipv4) {
4144 ovs_be32 nw_src = flow->nw_src;
4145 flow->nw_src = flow->ct_nw_src;
4146 flow->ct_nw_src = nw_src;
4147
4148 ovs_be32 nw_dst = flow->nw_dst;
4149 flow->nw_dst = flow->ct_nw_dst;
4150 flow->ct_nw_dst = nw_dst;
4151 } else {
4152 struct in6_addr ipv6_src = flow->ipv6_src;
4153 flow->ipv6_src = flow->ct_ipv6_src;
4154 flow->ct_ipv6_src = ipv6_src;
4155
4156 struct in6_addr ipv6_dst = flow->ipv6_dst;
4157 flow->ipv6_dst = flow->ct_ipv6_dst;
4158 flow->ct_ipv6_dst = ipv6_dst;
4159 }
4160
4161 ovs_be16 tp_src = flow->tp_src;
4162 flow->tp_src = flow->ct_tp_src;
4163 flow->ct_tp_src = tp_src;
4164
4165 ovs_be16 tp_dst = flow->tp_dst;
4166 flow->tp_dst = flow->ct_tp_dst;
4167 flow->ct_tp_dst = tp_dst;
4168 }
4169
4170 static void
4171 tuple_swap(struct flow *flow, struct flow_wildcards *wc)
4172 {
4173 bool ipv4 = (flow->dl_type == htons(ETH_TYPE_IP));
4174
4175 tuple_swap_flow(flow, ipv4);
4176 tuple_swap_flow(&wc->masks, ipv4);
4177 }
4178
4179 static void
4180 xlate_table_action(struct xlate_ctx *ctx, ofp_port_t in_port, uint8_t table_id,
4181 bool may_packet_in, bool honor_table_miss,
4182 bool with_ct_orig, bool is_last_action,
4183 xlate_actions_handler *xlator)
4184 {
4185 /* Check if we need to recirculate before matching in a table. */
4186 if (ctx->was_mpls) {
4187 ctx_trigger_freeze(ctx);
4188 return;
4189 }
4190 if (xlate_resubmit_resource_check(ctx)) {
4191 uint8_t old_table_id = ctx->table_id;
4192 struct rule_dpif *rule;
4193
4194 ctx->table_id = table_id;
4195
4196 /* Swap packet fields with CT 5-tuple if requested. */
4197 if (with_ct_orig) {
4198 /* Do not swap if there is no CT tuple, or if key is not IP. */
4199 if (ctx->xin->flow.ct_nw_proto == 0 ||
4200 !is_ip_any(&ctx->xin->flow)) {
4201 xlate_report_error(ctx,
4202 "resubmit(ct) with non-tracked or non-IP packet!");
4203 return;
4204 }
4205 tuple_swap(&ctx->xin->flow, ctx->wc);
4206 }
4207 rule = rule_dpif_lookup_from_table(ctx->xbridge->ofproto,
4208 ctx->xin->tables_version,
4209 &ctx->xin->flow, ctx->wc,
4210 ctx->xin->resubmit_stats,
4211 &ctx->table_id, in_port,
4212 may_packet_in, honor_table_miss,
4213 ctx->xin->xcache);
4214 /* Swap back. */
4215 if (with_ct_orig) {
4216 tuple_swap(&ctx->xin->flow, ctx->wc);
4217 }
4218
4219 if (rule) {
4220 /* Fill in the cache entry here instead of xlate_recursively
4221 * to make the reference counting more explicit. We take a
4222 * reference in the lookups above if we are going to cache the
4223 * rule. */
4224 if (ctx->xin->xcache) {
4225 struct xc_entry *entry;
4226
4227 entry = xlate_cache_add_entry(ctx->xin->xcache, XC_RULE);
4228 entry->rule = rule;
4229 ofproto_rule_ref(&rule->up);
4230 }
4231
4232 struct ovs_list *old_trace = ctx->xin->trace;
4233 xlate_report_table(ctx, rule, table_id);
4234 xlate_recursively(ctx, rule, table_id <= old_table_id,
4235 is_last_action, xlator);
4236 ctx->xin->trace = old_trace;
4237 }
4238
4239 ctx->table_id = old_table_id;
4240 return;
4241 }
4242 }
4243
4244 /* Consumes the group reference, which is only taken if xcache exists. */
4245 static void
4246 xlate_group_stats(struct xlate_ctx *ctx, struct group_dpif *group,
4247 struct ofputil_bucket *bucket)
4248 {
4249 if (ctx->xin->resubmit_stats) {
4250 group_dpif_credit_stats(group, bucket, ctx->xin->resubmit_stats);
4251 }
4252 if (ctx->xin->xcache) {
4253 struct xc_entry *entry;
4254
4255 entry = xlate_cache_add_entry(ctx->xin->xcache, XC_GROUP);
4256 entry->group.group = group;
4257 entry->group.bucket = bucket;
4258 }
4259 }
4260
4261 static void
4262 xlate_group_bucket(struct xlate_ctx *ctx, struct ofputil_bucket *bucket,
4263 bool is_last_action)
4264 {
4265 struct ovs_list *old_trace = ctx->xin->trace;
4266 if (OVS_UNLIKELY(ctx->xin->trace)) {
4267 char *s = xasprintf("bucket %"PRIu32, bucket->bucket_id);
4268 ctx->xin->trace = &oftrace_report(ctx->xin->trace, OFT_BUCKET,
4269 s)->subs;
4270 free(s);
4271 }
4272
4273 uint64_t action_list_stub[1024 / 8];
4274 struct ofpbuf action_list = OFPBUF_STUB_INITIALIZER(action_list_stub);
4275 struct ofpbuf action_set = ofpbuf_const_initializer(bucket->ofpacts,
4276 bucket->ofpacts_len);
4277 struct flow old_flow = ctx->xin->flow;
4278 bool old_was_mpls = ctx->was_mpls;
4279
4280 ofpacts_execute_action_set(&action_list, &action_set);
4281 ctx->depth++;
4282 do_xlate_actions(action_list.data, action_list.size, ctx, is_last_action);
4283 ctx->depth--;
4284
4285 ofpbuf_uninit(&action_list);
4286
4287 /* Check if need to freeze. */
4288 if (ctx->freezing) {
4289 finish_freezing(ctx);
4290 }
4291
4292 /* Roll back flow to previous state.
4293 * This is equivalent to cloning the packet for each bucket.
4294 *
4295 * As a side effect any subsequently applied actions will
4296 * also effectively be applied to a clone of the packet taken
4297 * just before applying the all or indirect group.
4298 *
4299 * Note that group buckets are action sets, hence they cannot modify the
4300 * main action set. Also any stack actions are ignored when executing an
4301 * action set, so group buckets cannot change the stack either.
4302 * However, we do allow resubmit actions in group buckets, which could
4303 * break the above assumptions. It is up to the controller to not mess up
4304 * with the action_set and stack in the tables resubmitted to from
4305 * group buckets. */
4306 ctx->xin->flow = old_flow;
4307
4308 /* The group bucket popping MPLS should have no effect after bucket
4309 * execution. */
4310 ctx->was_mpls = old_was_mpls;
4311
4312 /* The fact that the group bucket exits (for any reason) does not mean that
4313 * the translation after the group action should exit. Specifically, if
4314 * the group bucket freezes translation, the actions after the group action
4315 * must continue processing with the original, not the frozen packet! */
4316 ctx->exit = false;
4317
4318 /* Context error in a bucket should not impact processing of other buckets
4319 * or actions. This is similar to cloning a packet for group buckets.
4320 * There is no need to restore the error back to old value due to the fact
4321 * that we actually processed group action which can happen only when there
4322 * is no previous context error.
4323 *
4324 * Exception to above is errors which are system limits to protect
4325 * translation from running too long or occupy too much space. These errors
4326 * should not be masked. XLATE_RECURSION_TOO_DEEP, XLATE_TOO_MANY_RESUBMITS
4327 * and XLATE_STACK_TOO_DEEP fall in this category. */
4328 if (ctx->error == XLATE_TOO_MANY_MPLS_LABELS ||
4329 ctx->error == XLATE_UNSUPPORTED_PACKET_TYPE) {
4330 /* reset the error and continue processing other buckets */
4331 ctx->error = XLATE_OK;
4332 }
4333
4334 ctx->xin->trace = old_trace;
4335 }
4336
4337 static struct ofputil_bucket *
4338 pick_ff_group(struct xlate_ctx *ctx, struct group_dpif *group)
4339 {
4340 return group_first_live_bucket(ctx, group, 0);
4341 }
4342
4343 static struct ofputil_bucket *
4344 pick_default_select_group(struct xlate_ctx *ctx, struct group_dpif *group)
4345 {
4346 flow_mask_hash_fields(&ctx->xin->flow, ctx->wc,
4347 NX_HASH_FIELDS_SYMMETRIC_L4);
4348 return group_best_live_bucket(ctx, group,
4349 flow_hash_symmetric_l4(&ctx->xin->flow, 0));
4350 }
4351
4352 static struct ofputil_bucket *
4353 pick_hash_fields_select_group(struct xlate_ctx *ctx, struct group_dpif *group)
4354 {
4355 const struct field_array *fields = &group->up.props.fields;
4356 const uint8_t *mask_values = fields->values;
4357 uint32_t basis = hash_uint64(group->up.props.selection_method_param);
4358
4359 size_t i;
4360 BITMAP_FOR_EACH_1 (i, MFF_N_IDS, fields->used.bm) {
4361 const struct mf_field *mf = mf_from_id(i);
4362
4363 /* Skip fields for which prerequisites are not met. */
4364 if (!mf_are_prereqs_ok(mf, &ctx->xin->flow, ctx->wc)) {
4365 /* Skip the mask bytes for this field. */
4366 mask_values += mf->n_bytes;
4367 continue;
4368 }
4369
4370 union mf_value value;
4371 union mf_value mask;
4372
4373 mf_get_value(mf, &ctx->xin->flow, &value);
4374 /* Mask the value. */
4375 for (int j = 0; j < mf->n_bytes; j++) {
4376 mask.b[j] = *mask_values++;
4377 value.b[j] &= mask.b[j];
4378 }
4379 basis = hash_bytes(&value, mf->n_bytes, basis);
4380
4381 /* For tunnels, hash in whether the field is present. */
4382 if (mf_is_tun_metadata(mf)) {
4383 basis = hash_boolean(mf_is_set(mf, &ctx->xin->flow), basis);
4384 }
4385
4386 mf_mask_field_masked(mf, &mask, ctx->wc);
4387 }
4388
4389 return group_best_live_bucket(ctx, group, basis);
4390 }
4391
4392 static struct ofputil_bucket *
4393 pick_dp_hash_select_group(struct xlate_ctx *ctx, struct group_dpif *group)
4394 {
4395 uint32_t dp_hash = ctx->xin->flow.dp_hash;
4396
4397 /* dp_hash value 0 is special since it means that the dp_hash has not been
4398 * computed, as all computed dp_hash values are non-zero. Therefore
4399 * compare to zero can be used to decide if the dp_hash value is valid
4400 * without masking the dp_hash field. */
4401 if (!dp_hash) {
4402 enum ovs_hash_alg hash_alg = group->hash_alg;
4403 if (hash_alg > ctx->xbridge->support.max_hash_alg) {
4404 /* Algorithm supported by all datapaths. */
4405 hash_alg = OVS_HASH_ALG_L4;
4406 }
4407 ctx_trigger_recirculate_with_hash(ctx, hash_alg, group->hash_basis);
4408 return NULL;
4409 } else {
4410 uint32_t hash_mask = group->hash_mask;
4411 ctx->wc->masks.dp_hash |= hash_mask;
4412
4413 /* Starting from the original masked dp_hash value iterate over the
4414 * hash mapping table to find the first live bucket. As the buckets
4415 * are quasi-randomly spread over the hash values, this maintains
4416 * a distribution according to bucket weights even when some buckets
4417 * are non-live. */
4418 for (int i = 0; i <= hash_mask; i++) {
4419 struct ofputil_bucket *b =
4420 group->hash_map[(dp_hash + i) & hash_mask];
4421 if (bucket_is_alive(ctx, b, 0)) {
4422 return b;
4423 }
4424 }
4425
4426 return NULL;
4427 }
4428 }
4429
4430 static struct ofputil_bucket *
4431 pick_select_group(struct xlate_ctx *ctx, struct group_dpif *group)
4432 {
4433 /* Select groups may access flow keys beyond L2 in order to
4434 * select a bucket. Recirculate as appropriate to make this possible.
4435 */
4436 if (ctx->was_mpls) {
4437 ctx_trigger_freeze(ctx);
4438 }
4439
4440 switch (group->selection_method) {
4441 case SEL_METHOD_DEFAULT:
4442 return pick_default_select_group(ctx, group);
4443 break;
4444 case SEL_METHOD_HASH:
4445 return pick_hash_fields_select_group(ctx, group);
4446 break;
4447 case SEL_METHOD_DP_HASH:
4448 return pick_dp_hash_select_group(ctx, group);
4449 break;
4450 default:
4451 /* Parsing of groups ensures this never happens */
4452 OVS_NOT_REACHED();
4453 }
4454
4455 return NULL;
4456 }
4457
4458 static void
4459 xlate_group_action__(struct xlate_ctx *ctx, struct group_dpif *group,
4460 bool is_last_action)
4461 {
4462 bool was_in_group = ctx->in_group;
4463 ctx->in_group = true;
4464
4465 if (group->up.type == OFPGT11_ALL || group->up.type == OFPGT11_INDIRECT) {
4466 struct ovs_list *last_bucket = ovs_list_back(&group->up.buckets);
4467 struct ofputil_bucket *bucket;
4468 LIST_FOR_EACH (bucket, list_node, &group->up.buckets) {
4469 bool is_last_bucket = &bucket->list_node == last_bucket;
4470 xlate_group_bucket(ctx, bucket, is_last_action && is_last_bucket);
4471 }
4472 xlate_group_stats(ctx, group, NULL);
4473 } else {
4474 struct ofputil_bucket *bucket;
4475 if (group->up.type == OFPGT11_SELECT) {
4476 bucket = pick_select_group(ctx, group);
4477 } else if (group->up.type == OFPGT11_FF) {
4478 bucket = pick_ff_group(ctx, group);
4479 } else {
4480 OVS_NOT_REACHED();
4481 }
4482
4483 if (bucket) {
4484 xlate_report(ctx, OFT_DETAIL, "using bucket %"PRIu32,
4485 bucket->bucket_id);
4486 xlate_group_bucket(ctx, bucket, is_last_action);
4487 xlate_group_stats(ctx, group, bucket);
4488 } else {
4489 xlate_report(ctx, OFT_DETAIL, "no live bucket");
4490 if (ctx->xin->xcache) {
4491 ofproto_group_unref(&group->up);
4492 }
4493 }
4494 }
4495
4496 ctx->in_group = was_in_group;
4497 }
4498
4499 static bool
4500 xlate_group_action(struct xlate_ctx *ctx, uint32_t group_id,
4501 bool is_last_action)
4502 {
4503 if (xlate_resubmit_resource_check(ctx)) {
4504 struct group_dpif *group;
4505
4506 /* Take ref only if xcache exists. */
4507 group = group_dpif_lookup(ctx->xbridge->ofproto, group_id,
4508 ctx->xin->tables_version, ctx->xin->xcache);
4509 if (!group) {
4510 /* XXX: Should set ctx->error ? */
4511 xlate_report(ctx, OFT_WARN, "output to nonexistent group %"PRIu32,
4512 group_id);
4513 return true;
4514 }
4515 xlate_group_action__(ctx, group, is_last_action);
4516 }
4517
4518 return false;
4519 }
4520
4521 static void
4522 xlate_ofpact_resubmit(struct xlate_ctx *ctx,
4523 const struct ofpact_resubmit *resubmit,
4524 bool is_last_action)
4525 {
4526 ofp_port_t in_port;
4527 uint8_t table_id;
4528 bool may_packet_in = false;
4529 bool honor_table_miss = false;
4530
4531 if (ctx->rule && rule_dpif_is_internal(ctx->rule)) {
4532 /* Still allow missed packets to be sent to the controller
4533 * if resubmitting from an internal table. */
4534 may_packet_in = true;
4535 honor_table_miss = true;
4536 }
4537
4538 in_port = resubmit->in_port;
4539 if (in_port == OFPP_IN_PORT) {
4540 in_port = ctx->xin->flow.in_port.ofp_port;
4541 }
4542
4543 table_id = resubmit->table_id;
4544 if (table_id == 255) {
4545 table_id = ctx->table_id;
4546 }
4547
4548 xlate_table_action(ctx, in_port, table_id, may_packet_in,
4549 honor_table_miss, resubmit->with_ct_orig,
4550 is_last_action, do_xlate_actions);
4551 }
4552
4553 static void
4554 flood_packet_to_port(struct xlate_ctx *ctx, const struct xport *xport,
4555 bool all, bool is_last_action)
4556 {
4557 if (!xport) {
4558 return;
4559 }
4560
4561 if (all) {
4562 compose_output_action__(ctx, xport->ofp_port, NULL, false,
4563 is_last_action, false);
4564 } else {
4565 compose_output_action(ctx, xport->ofp_port, NULL, is_last_action,
4566 false);
4567 }
4568 }
4569
4570 static void
4571 flood_packets(struct xlate_ctx *ctx, bool all, bool is_last_action)
4572 {
4573 const struct xport *xport, *last = NULL;
4574
4575 /* Use 'last' the keep track of the last output port. */
4576 HMAP_FOR_EACH (xport, ofp_node, &ctx->xbridge->xports) {
4577 if (xport->ofp_port == ctx->xin->flow.in_port.ofp_port) {
4578 continue;
4579 }
4580
4581 if (all || !(xport->config & OFPUTIL_PC_NO_FLOOD)) {
4582 /* 'last' is not the last port, send a packet out, and
4583 * update 'last'. */
4584 flood_packet_to_port(ctx, last, all, false);
4585 last = xport;
4586 }
4587 }
4588
4589 /* Send the packet to the 'last' port. */
4590 flood_packet_to_port(ctx, last, all, is_last_action);
4591 ctx->nf_output_iface = NF_OUT_FLOOD;
4592 }
4593
4594 static void
4595 put_controller_user_action(struct xlate_ctx *ctx,
4596 bool dont_send, bool continuation,
4597 uint32_t recirc_id, int len,
4598 enum ofp_packet_in_reason reason,
4599 uint16_t controller_id)
4600 {
4601 struct user_action_cookie cookie;
4602
4603 memset(&cookie, 0, sizeof cookie);
4604 cookie.type = USER_ACTION_COOKIE_CONTROLLER;
4605 cookie.ofp_in_port = OFPP_NONE,
4606 cookie.ofproto_uuid = ctx->xbridge->ofproto->uuid;
4607 cookie.controller.dont_send = dont_send;
4608 cookie.controller.continuation = continuation;
4609 cookie.controller.reason = reason;
4610 cookie.controller.recirc_id = recirc_id;
4611 put_32aligned_be64(&cookie.controller.rule_cookie, ctx->rule_cookie);
4612 cookie.controller.controller_id = controller_id;
4613 cookie.controller.max_len = len;
4614
4615 odp_port_t odp_port = ofp_port_to_odp_port(ctx->xbridge,
4616 ctx->xin->flow.in_port.ofp_port);
4617 uint32_t pid = dpif_port_get_pid(ctx->xbridge->dpif, odp_port,
4618 flow_hash_5tuple(&ctx->xin->flow, 0));
4619 odp_put_userspace_action(pid, &cookie, sizeof cookie, ODPP_NONE,
4620 false, ctx->odp_actions);
4621 }
4622
4623 static void
4624 xlate_controller_action(struct xlate_ctx *ctx, int len,
4625 enum ofp_packet_in_reason reason,
4626 uint16_t controller_id,
4627 const uint8_t *userdata, size_t userdata_len)
4628 {
4629 xlate_commit_actions(ctx);
4630
4631 /* A packet sent by an action in a table-miss rule is considered an
4632 * explicit table miss. OpenFlow before 1.3 doesn't have that concept so
4633 * it will get translated back to OFPR_ACTION for those versions. */
4634 if (reason == OFPR_ACTION
4635 && ctx->rule && rule_is_table_miss(&ctx->rule->up)) {
4636 reason = OFPR_EXPLICIT_MISS;
4637 }
4638
4639 struct frozen_state state = {
4640 .table_id = ctx->table_id,
4641 .ofproto_uuid = ctx->xbridge->ofproto->uuid,
4642 .stack = ctx->stack.data,
4643 .stack_size = ctx->stack.size,
4644 .mirrors = ctx->mirrors,
4645 .conntracked = ctx->conntracked,
4646 .ofpacts = NULL,
4647 .ofpacts_len = 0,
4648 .action_set = NULL,
4649 .action_set_len = 0,
4650 .userdata = CONST_CAST(uint8_t *, userdata),
4651 .userdata_len = userdata_len,
4652 };
4653 frozen_metadata_from_flow(&state.metadata, &ctx->xin->flow);
4654
4655 uint32_t recirc_id = recirc_alloc_id_ctx(&state);
4656 if (!recirc_id) {
4657 xlate_report_error(ctx, "Failed to allocate recirculation id");
4658 ctx->error = XLATE_NO_RECIRCULATION_CONTEXT;
4659 return;
4660 }
4661 recirc_refs_add(&ctx->xout->recircs, recirc_id);
4662
4663 size_t offset;
4664 size_t ac_offset;
4665 uint32_t meter_id = ctx->xbridge->ofproto->up.controller_meter_id;
4666 if (meter_id != UINT32_MAX) {
4667 /* If controller meter is configured, generate clone(meter, userspace)
4668 * action. */
4669 offset = nl_msg_start_nested(ctx->odp_actions, OVS_ACTION_ATTR_SAMPLE);
4670 nl_msg_put_u32(ctx->odp_actions, OVS_SAMPLE_ATTR_PROBABILITY,
4671 UINT32_MAX);
4672 ac_offset = nl_msg_start_nested(ctx->odp_actions,
4673 OVS_SAMPLE_ATTR_ACTIONS);
4674 nl_msg_put_u32(ctx->odp_actions, OVS_ACTION_ATTR_METER, meter_id);
4675 }
4676
4677 /* Generate the datapath flows even if we don't send the packet-in
4678 * so that debugging more closely represents normal state. */
4679 bool dont_send = false;
4680 if (!ctx->xin->allow_side_effects && !ctx->xin->xcache) {
4681 dont_send = true;
4682 }
4683 put_controller_user_action(ctx, dont_send, false, recirc_id, len,
4684 reason, controller_id);
4685
4686 if (meter_id != UINT32_MAX) {
4687 nl_msg_end_nested(ctx->odp_actions, ac_offset);
4688 nl_msg_end_nested(ctx->odp_actions, offset);
4689 }
4690 }
4691
4692 /* Creates a frozen state, and allocates a unique recirc id for the given
4693 * state. Returns a non-zero recirc id if it is allocated successfully.
4694 * Returns 0 otherwise.
4695 **/
4696 static uint32_t
4697 finish_freezing__(struct xlate_ctx *ctx, uint8_t table)
4698 {
4699 ovs_assert(ctx->freezing);
4700
4701 struct frozen_state state = {
4702 .table_id = table,
4703 .ofproto_uuid = ctx->xbridge->ofproto->uuid,
4704 .stack = ctx->stack.data,
4705 .stack_size = ctx->stack.size,
4706 .mirrors = ctx->mirrors,
4707 .conntracked = ctx->conntracked,
4708 .xport_uuid = ctx->xin->xport_uuid,
4709 .ofpacts = ctx->frozen_actions.data,
4710 .ofpacts_len = ctx->frozen_actions.size,
4711 .action_set = ctx->action_set.data,
4712 .action_set_len = ctx->action_set.size,
4713 .userdata = ctx->pause ? CONST_CAST(uint8_t *,ctx->pause->userdata)
4714 : NULL,
4715 .userdata_len = ctx->pause ? ctx->pause->userdata_len : 0,
4716 };
4717 frozen_metadata_from_flow(&state.metadata, &ctx->xin->flow);
4718
4719 /* Allocate a unique recirc id for the given metadata state in the
4720 * flow. An existing id, with a new reference to the corresponding
4721 * recirculation context, will be returned if possible.
4722 * The life-cycle of this recirc id is managed by associating it
4723 * with the udpif key ('ukey') created for each new datapath flow. */
4724 uint32_t recirc_id = recirc_alloc_id_ctx(&state);
4725 if (!recirc_id) {
4726 xlate_report_error(ctx, "Failed to allocate recirculation id");
4727 ctx->error = XLATE_NO_RECIRCULATION_CONTEXT;
4728 return 0;
4729 }
4730 recirc_refs_add(&ctx->xout->recircs, recirc_id);
4731
4732 if (ctx->pause) {
4733 if (!ctx->xin->allow_side_effects && !ctx->xin->xcache) {
4734 return 0;
4735 }
4736
4737 put_controller_user_action(ctx, false, true, recirc_id,
4738 ctx->pause->max_len,
4739 ctx->pause->reason,
4740 ctx->pause->controller_id);
4741 } else {
4742 if (ctx->recirc_update_dp_hash) {
4743 struct ovs_action_hash *act_hash;
4744
4745 /* Hash action. */
4746 act_hash = nl_msg_put_unspec_uninit(ctx->odp_actions,
4747 OVS_ACTION_ATTR_HASH,
4748 sizeof *act_hash);
4749 act_hash->hash_alg = ctx->dp_hash_alg;
4750 act_hash->hash_basis = ctx->dp_hash_basis;
4751 }
4752 nl_msg_put_u32(ctx->odp_actions, OVS_ACTION_ATTR_RECIRC, recirc_id);
4753 }
4754
4755 /* Undo changes done by freezing. */
4756 ctx_cancel_freeze(ctx);
4757 return recirc_id;
4758 }
4759
4760 /* Called only when we're freezing. */
4761 static void
4762 finish_freezing(struct xlate_ctx *ctx)
4763 {
4764 xlate_commit_actions(ctx);
4765 finish_freezing__(ctx, 0);
4766 }
4767
4768 /* Fork the pipeline here. The current packet will continue processing the
4769 * current action list. A clone of the current packet will recirculate, skip
4770 * the remainder of the current action list and asynchronously resume pipeline
4771 * processing in 'table' with the current metadata and action set. */
4772 static void
4773 compose_recirculate_and_fork(struct xlate_ctx *ctx, uint8_t table,
4774 const uint16_t zone)
4775 {
4776 uint32_t recirc_id;
4777 ctx->freezing = true;
4778 recirc_id = finish_freezing__(ctx, table);
4779
4780 if (OVS_UNLIKELY(ctx->xin->trace) && recirc_id) {
4781 if (oftrace_add_recirc_node(ctx->xin->recirc_queue,
4782 OFT_RECIRC_CONNTRACK, &ctx->xin->flow,
4783 ctx->xin->packet, recirc_id, zone)) {
4784 xlate_report(ctx, OFT_DETAIL, "A clone of the packet is forked to "
4785 "recirculate. The forked pipeline will be resumed at "
4786 "table %u.", table);
4787 } else {
4788 xlate_report(ctx, OFT_DETAIL, "Failed to trace the conntrack "
4789 "forked pipeline with recirc_id = %d.", recirc_id);
4790 }
4791 }
4792 }
4793
4794 static void
4795 compose_mpls_push_action(struct xlate_ctx *ctx, struct ofpact_push_mpls *mpls)
4796 {
4797 struct flow *flow = &ctx->xin->flow;
4798 int n;
4799
4800 ovs_assert(eth_type_mpls(mpls->ethertype));
4801
4802 n = flow_count_mpls_labels(flow, ctx->wc);
4803 if (!n) {
4804 xlate_commit_actions(ctx);
4805 } else if (n >= FLOW_MAX_MPLS_LABELS) {
4806 if (ctx->xin->packet != NULL) {
4807 xlate_report_error(ctx, "dropping packet on which an MPLS push "
4808 "action can't be performed as it would have "
4809 "more MPLS LSEs than the %d supported.",
4810 FLOW_MAX_MPLS_LABELS);
4811 }
4812 ctx->error = XLATE_TOO_MANY_MPLS_LABELS;
4813 return;
4814 }
4815
4816 /* Update flow's MPLS stack, and clear L3/4 fields to mark them invalid. */
4817 flow_push_mpls(flow, n, mpls->ethertype, ctx->wc, true);
4818 }
4819
4820 static void
4821 compose_mpls_pop_action(struct xlate_ctx *ctx, ovs_be16 eth_type)
4822 {
4823 struct flow *flow = &ctx->xin->flow;
4824 int n = flow_count_mpls_labels(flow, ctx->wc);
4825
4826 if (flow_pop_mpls(flow, n, eth_type, ctx->wc)) {
4827 if (!eth_type_mpls(eth_type) && ctx->xbridge->support.odp.recirc) {
4828 ctx->was_mpls = true;
4829 }
4830 } else if (n >= FLOW_MAX_MPLS_LABELS) {
4831 if (ctx->xin->packet != NULL) {
4832 xlate_report_error(ctx, "dropping packet on which an "
4833 "MPLS pop action can't be performed as it has "
4834 "more MPLS LSEs than the %d supported.",
4835 FLOW_MAX_MPLS_LABELS);
4836 }
4837 ctx->error = XLATE_TOO_MANY_MPLS_LABELS;
4838 ofpbuf_clear(ctx->odp_actions);
4839 }
4840 }
4841
4842 static bool
4843 compose_dec_ttl(struct xlate_ctx *ctx, struct ofpact_cnt_ids *ids)
4844 {
4845 struct flow *flow = &ctx->xin->flow;
4846
4847 if (!is_ip_any(flow)) {
4848 return false;
4849 }
4850
4851 ctx->wc->masks.nw_ttl = 0xff;
4852 if (flow->nw_ttl > 1) {
4853 flow->nw_ttl--;
4854 return false;
4855 } else {
4856 size_t i;
4857
4858 for (i = 0; i < ids->n_controllers; i++) {
4859 xlate_controller_action(ctx, UINT16_MAX, OFPR_INVALID_TTL,
4860 ids->cnt_ids[i], NULL, 0);
4861 }
4862
4863 /* Stop processing for current table. */
4864 xlate_report(ctx, OFT_WARN, "IPv%d decrement TTL exception",
4865 flow->dl_type == htons(ETH_TYPE_IP) ? 4 : 6);
4866 return true;
4867 }
4868 }
4869
4870 static void
4871 compose_set_mpls_label_action(struct xlate_ctx *ctx, ovs_be32 label)
4872 {
4873 if (eth_type_mpls(ctx->xin->flow.dl_type)) {
4874 ctx->wc->masks.mpls_lse[0] |= htonl(MPLS_LABEL_MASK);
4875 set_mpls_lse_label(&ctx->xin->flow.mpls_lse[0], label);
4876 }
4877 }
4878
4879 static void
4880 compose_set_mpls_tc_action(struct xlate_ctx *ctx, uint8_t tc)
4881 {
4882 if (eth_type_mpls(ctx->xin->flow.dl_type)) {
4883 ctx->wc->masks.mpls_lse[0] |= htonl(MPLS_TC_MASK);
4884 set_mpls_lse_tc(&ctx->xin->flow.mpls_lse[0], tc);
4885 }
4886 }
4887
4888 static bool
4889 compose_dec_nsh_ttl_action(struct xlate_ctx *ctx)
4890 {
4891 struct flow *flow = &ctx->xin->flow;
4892
4893 if ((flow->packet_type == htonl(PT_NSH)) ||
4894 (flow->dl_type == htons(ETH_TYPE_NSH))) {
4895 ctx->wc->masks.nsh.ttl = 0xff;
4896 if (flow->nsh.ttl > 1) {
4897 flow->nsh.ttl--;
4898 return false;
4899 } else {
4900 xlate_controller_action(ctx, UINT16_MAX, OFPR_INVALID_TTL,
4901 0, NULL, 0);
4902 }
4903 }
4904
4905 /* Stop processing for current table. */
4906 xlate_report(ctx, OFT_WARN, "NSH decrement TTL exception");
4907 return true;
4908 }
4909
4910 static void
4911 compose_set_mpls_ttl_action(struct xlate_ctx *ctx, uint8_t ttl)
4912 {
4913 if (eth_type_mpls(ctx->xin->flow.dl_type)) {
4914 ctx->wc->masks.mpls_lse[0] |= htonl(MPLS_TTL_MASK);
4915 set_mpls_lse_ttl(&ctx->xin->flow.mpls_lse[0], ttl);
4916 }
4917 }
4918
4919 static bool
4920 compose_dec_mpls_ttl_action(struct xlate_ctx *ctx)
4921 {
4922 struct flow *flow = &ctx->xin->flow;
4923
4924 if (eth_type_mpls(flow->dl_type)) {
4925 uint8_t ttl = mpls_lse_to_ttl(flow->mpls_lse[0]);
4926
4927 ctx->wc->masks.mpls_lse[0] |= htonl(MPLS_TTL_MASK);
4928 if (ttl > 1) {
4929 ttl--;
4930 set_mpls_lse_ttl(&flow->mpls_lse[0], ttl);
4931 return false;
4932 } else {
4933 xlate_controller_action(ctx, UINT16_MAX, OFPR_INVALID_TTL, 0,
4934 NULL, 0);
4935 }
4936 }
4937
4938 /* Stop processing for current table. */
4939 xlate_report(ctx, OFT_WARN, "MPLS decrement TTL exception");
4940 return true;
4941 }
4942
4943 /* Emits an action that outputs to 'port', within 'ctx'.
4944 *
4945 * 'controller_len' affects only packets sent to an OpenFlow controller. It
4946 * is the maximum number of bytes of the packet to send. UINT16_MAX means to
4947 * send the whole packet (and 0 means to omit the packet entirely).
4948 *
4949 * 'may_packet_in' determines whether the packet may be sent to an OpenFlow
4950 * controller. If it is false, then the packet is never sent to the OpenFlow
4951 * controller.
4952 *
4953 * 'is_last_action' should be true if this output is the last OpenFlow action
4954 * to be processed, which enables certain optimizations.
4955 *
4956 * 'truncate' should be true if the packet to be output is being truncated,
4957 * which suppresses certain optimizations. */
4958 static void
4959 xlate_output_action(struct xlate_ctx *ctx, ofp_port_t port,
4960 uint16_t controller_len, bool may_packet_in,
4961 bool is_last_action, bool truncate)
4962 {
4963 ofp_port_t prev_nf_output_iface = ctx->nf_output_iface;
4964
4965 ctx->nf_output_iface = NF_OUT_DROP;
4966
4967 switch (port) {
4968 case OFPP_IN_PORT:
4969 compose_output_action(ctx, ctx->xin->flow.in_port.ofp_port, NULL,
4970 is_last_action, truncate);
4971 break;
4972 case OFPP_TABLE:
4973 xlate_table_action(ctx, ctx->xin->flow.in_port.ofp_port,
4974 0, may_packet_in, true, false, false,
4975 do_xlate_actions);
4976 break;
4977 case OFPP_NORMAL:
4978 xlate_normal(ctx);
4979 break;
4980 case OFPP_FLOOD:
4981 flood_packets(ctx, false, is_last_action);
4982 break;
4983 case OFPP_ALL:
4984 flood_packets(ctx, true, is_last_action);
4985 break;
4986 case OFPP_CONTROLLER:
4987 xlate_controller_action(ctx, controller_len,
4988 (ctx->in_packet_out ? OFPR_PACKET_OUT
4989 : ctx->in_group ? OFPR_GROUP
4990 : ctx->in_action_set ? OFPR_ACTION_SET
4991 : OFPR_ACTION),
4992 0, NULL, 0);
4993 break;
4994 case OFPP_NONE:
4995 break;
4996 case OFPP_LOCAL:
4997 default:
4998 if (port != ctx->xin->flow.in_port.ofp_port) {
4999 compose_output_action(ctx, port, NULL, is_last_action, truncate);
5000 } else {
5001 xlate_report(ctx, OFT_WARN, "skipping output to input port");
5002 }
5003 break;
5004 }
5005
5006 if (prev_nf_output_iface == NF_OUT_FLOOD) {
5007 ctx->nf_output_iface = NF_OUT_FLOOD;
5008 } else if (ctx->nf_output_iface == NF_OUT_DROP) {
5009 ctx->nf_output_iface = prev_nf_output_iface;
5010 } else if (prev_nf_output_iface != NF_OUT_DROP &&
5011 ctx->nf_output_iface != NF_OUT_FLOOD) {
5012 ctx->nf_output_iface = NF_OUT_MULTI;
5013 }
5014 }
5015
5016 static void
5017 xlate_output_reg_action(struct xlate_ctx *ctx,
5018 const struct ofpact_output_reg *or,
5019 bool is_last_action)
5020 {
5021 uint64_t port = mf_get_subfield(&or->src, &ctx->xin->flow);
5022 if (port <= UINT16_MAX) {
5023 xlate_report(ctx, OFT_DETAIL, "output port is %"PRIu64, port);
5024
5025 union mf_subvalue value;
5026
5027 memset(&value, 0xff, sizeof value);
5028 mf_write_subfield_flow(&or->src, &value, &ctx->wc->masks);
5029 xlate_output_action(ctx, u16_to_ofp(port), or->max_len,
5030 false, is_last_action, false);
5031 } else {
5032 xlate_report(ctx, OFT_WARN, "output port %"PRIu64" is out of range",
5033 port);
5034 }
5035 }
5036
5037 static void
5038 xlate_output_trunc_action(struct xlate_ctx *ctx,
5039 ofp_port_t port, uint32_t max_len,
5040 bool is_last_action)
5041 {
5042 bool support_trunc = ctx->xbridge->support.trunc;
5043 struct ovs_action_trunc *trunc;
5044 char name[OFP10_MAX_PORT_NAME_LEN];
5045
5046 switch (port) {
5047 case OFPP_TABLE:
5048 case OFPP_NORMAL:
5049 case OFPP_FLOOD:
5050 case OFPP_ALL:
5051 case OFPP_CONTROLLER:
5052 case OFPP_NONE:
5053 ofputil_port_to_string(port, NULL, name, sizeof name);
5054 xlate_report(ctx, OFT_WARN,
5055 "output_trunc does not support port: %s", name);
5056 break;
5057 case OFPP_LOCAL:
5058 case OFPP_IN_PORT:
5059 default:
5060 if (port != ctx->xin->flow.in_port.ofp_port) {
5061 const struct xport *xport = get_ofp_port(ctx->xbridge, port);
5062
5063 if (xport == NULL || xport->odp_port == ODPP_NONE) {
5064 /* Since truncate happens at its following output action, if
5065 * the output port is a patch port, the behavior is somehow
5066 * unpredictable. For simplicity, disallow this case. */
5067 ofputil_port_to_string(port, NULL, name, sizeof name);
5068 xlate_report_error(ctx, "output_trunc does not support "
5069 "patch port %s", name);
5070 break;
5071 }
5072
5073 trunc = nl_msg_put_unspec_uninit(ctx->odp_actions,
5074 OVS_ACTION_ATTR_TRUNC,
5075 sizeof *trunc);
5076 trunc->max_len = max_len;
5077 xlate_output_action(ctx, port, 0, false, is_last_action, true);
5078 if (!support_trunc) {
5079 ctx->xout->slow |= SLOW_ACTION;
5080 }
5081 } else {
5082 xlate_report(ctx, OFT_WARN, "skipping output to input port");
5083 }
5084 break;
5085 }
5086 }
5087
5088 static void
5089 xlate_enqueue_action(struct xlate_ctx *ctx,
5090 const struct ofpact_enqueue *enqueue,
5091 bool is_last_action)
5092 {
5093 ofp_port_t ofp_port = enqueue->port;
5094 uint32_t queue_id = enqueue->queue;
5095 uint32_t flow_priority, priority;
5096 int error;
5097
5098 /* Translate queue to priority. */
5099 error = dpif_queue_to_priority(ctx->xbridge->dpif, queue_id, &priority);
5100 if (error) {
5101 /* Fall back to ordinary output action. */
5102 xlate_output_action(ctx, enqueue->port, 0, false,
5103 is_last_action, false);
5104 return;
5105 }
5106
5107 /* Check output port. */
5108 if (ofp_port == OFPP_IN_PORT) {
5109 ofp_port = ctx->xin->flow.in_port.ofp_port;
5110 } else if (ofp_port == ctx->xin->flow.in_port.ofp_port) {
5111 return;
5112 }
5113
5114 /* Add datapath actions. */
5115 flow_priority = ctx->xin->flow.skb_priority;
5116 ctx->xin->flow.skb_priority = priority;
5117 compose_output_action(ctx, ofp_port, NULL, is_last_action, false);
5118 ctx->xin->flow.skb_priority = flow_priority;
5119
5120 /* Update NetFlow output port. */
5121 if (ctx->nf_output_iface == NF_OUT_DROP) {
5122 ctx->nf_output_iface = ofp_port;
5123 } else if (ctx->nf_output_iface != NF_OUT_FLOOD) {
5124 ctx->nf_output_iface = NF_OUT_MULTI;
5125 }
5126 }
5127
5128 static void
5129 xlate_set_queue_action(struct xlate_ctx *ctx, uint32_t queue_id)
5130 {
5131 uint32_t skb_priority;
5132
5133 if (!dpif_queue_to_priority(ctx->xbridge->dpif, queue_id, &skb_priority)) {
5134 ctx->xin->flow.skb_priority = skb_priority;
5135 } else {
5136 /* Couldn't translate queue to a priority. Nothing to do. A warning
5137 * has already been logged. */
5138 }
5139 }
5140
5141 static bool
5142 slave_enabled_cb(ofp_port_t ofp_port, void *xbridge_)
5143 {
5144 const struct xbridge *xbridge = xbridge_;
5145 struct xport *port;
5146
5147 switch (ofp_port) {
5148 case OFPP_IN_PORT:
5149 case OFPP_TABLE:
5150 case OFPP_NORMAL:
5151 case OFPP_FLOOD:
5152 case OFPP_ALL:
5153 case OFPP_NONE:
5154 return true;
5155 case OFPP_CONTROLLER: /* Not supported by the bundle action. */
5156 return false;
5157 default:
5158 port = get_ofp_port(xbridge, ofp_port);
5159 return port ? port->may_enable : false;
5160 }
5161 }
5162
5163 static void
5164 xlate_bundle_action(struct xlate_ctx *ctx,
5165 const struct ofpact_bundle *bundle,
5166 bool is_last_action)
5167 {
5168 ofp_port_t port;
5169
5170 port = bundle_execute(bundle, &ctx->xin->flow, ctx->wc, slave_enabled_cb,
5171 CONST_CAST(struct xbridge *, ctx->xbridge));
5172 if (bundle->dst.field) {
5173 nxm_reg_load(&bundle->dst, ofp_to_u16(port), &ctx->xin->flow, ctx->wc);
5174 xlate_report_subfield(ctx, &bundle->dst);
5175 } else {
5176 xlate_output_action(ctx, port, 0, false, is_last_action, false);
5177 }
5178 }
5179
5180 static void
5181 xlate_learn_action(struct xlate_ctx *ctx, const struct ofpact_learn *learn)
5182 {
5183 learn_mask(learn, ctx->wc);
5184
5185 if (ctx->xin->xcache || ctx->xin->allow_side_effects) {
5186 uint64_t ofpacts_stub[1024 / 8];
5187 struct ofputil_flow_mod fm;
5188 struct ofproto_flow_mod ofm__, *ofm;
5189 struct ofpbuf ofpacts;
5190 enum ofperr error;
5191
5192 if (ctx->xin->xcache) {
5193 ofm = xmalloc(sizeof *ofm);
5194 } else {
5195 ofm = &ofm__;
5196 }
5197
5198 ofpbuf_use_stub(&ofpacts, ofpacts_stub, sizeof ofpacts_stub);
5199 learn_execute(learn, &ctx->xin->flow, &fm, &ofpacts);
5200 if (OVS_UNLIKELY(ctx->xin->trace)) {
5201 struct ds s = DS_EMPTY_INITIALIZER;
5202 ds_put_format(&s, "table=%"PRIu8" ", fm.table_id);
5203 minimatch_format(&fm.match,
5204 ofproto_get_tun_tab(&ctx->xin->ofproto->up),
5205 NULL, &s, OFP_DEFAULT_PRIORITY);
5206 ds_chomp(&s, ' ');
5207 ds_put_format(&s, " priority=%d", fm.priority);
5208 if (fm.new_cookie) {
5209 ds_put_format(&s, " cookie=%#"PRIx64, ntohll(fm.new_cookie));
5210 }
5211 if (fm.idle_timeout != OFP_FLOW_PERMANENT) {
5212 ds_put_format(&s, " idle=%"PRIu16, fm.idle_timeout);
5213 }
5214 if (fm.hard_timeout != OFP_FLOW_PERMANENT) {
5215 ds_put_format(&s, " hard=%"PRIu16, fm.hard_timeout);
5216 }
5217 if (fm.flags & NX_LEARN_F_SEND_FLOW_REM) {
5218 ds_put_cstr(&s, " send_flow_rem");
5219 }
5220 ds_put_cstr(&s, " actions=");
5221 struct ofpact_format_params fp = { .s = &s };
5222 ofpacts_format(fm.ofpacts, fm.ofpacts_len, &fp);
5223 xlate_report(ctx, OFT_DETAIL, "%s", ds_cstr(&s));
5224 ds_destroy(&s);
5225 }
5226 error = ofproto_dpif_flow_mod_init_for_learn(ctx->xbridge->ofproto,
5227 &fm, ofm);
5228 ofpbuf_uninit(&ofpacts);
5229
5230 if (!error) {
5231 bool success = true;
5232 if (ctx->xin->allow_side_effects) {
5233 error = ofproto_flow_mod_learn(ofm, ctx->xin->xcache != NULL,
5234 learn->limit, &success);
5235 } else if (learn->limit) {
5236 if (!ofm->temp_rule
5237 || ofm->temp_rule->state != RULE_INSERTED) {
5238 /* The learned rule expired and there are no packets, so
5239 * we cannot learn again. Since the translated actions
5240 * depend on the result of learning, we tell the caller
5241 * that there's no point in caching this result. */
5242 ctx->xout->avoid_caching = true;
5243 }
5244 }
5245
5246 if (learn->flags & NX_LEARN_F_WRITE_RESULT) {
5247 nxm_reg_load(&learn->result_dst, success ? 1 : 0,
5248 &ctx->xin->flow, ctx->wc);
5249 xlate_report_subfield(ctx, &learn->result_dst);
5250 }
5251
5252 if (success && ctx->xin->xcache) {
5253 struct xc_entry *entry;
5254
5255 entry = xlate_cache_add_entry(ctx->xin->xcache, XC_LEARN);
5256 entry->learn.ofm = ofm;
5257 entry->learn.limit = learn->limit;
5258 ofm = NULL;
5259 } else {
5260 ofproto_flow_mod_uninit(ofm);
5261 }
5262
5263 if (OVS_UNLIKELY(ctx->xin->trace && !success)) {
5264 xlate_report(ctx, OFT_DETAIL, "Limit exceeded, learn failed");
5265 }
5266 }
5267
5268 if (ofm != &ofm__) {
5269 free(ofm);
5270 }
5271
5272 if (error) {
5273 xlate_report_error(ctx, "LEARN action execution failed (%s).",
5274 ofperr_to_string(error));
5275 }
5276
5277 minimatch_destroy(&fm.match);
5278 } else {
5279 xlate_report(ctx, OFT_WARN,
5280 "suppressing side effects, so learn action ignored");
5281 }
5282 }
5283
5284 static void
5285 xlate_fin_timeout__(struct rule_dpif *rule, uint16_t tcp_flags,
5286 uint16_t idle_timeout, uint16_t hard_timeout)
5287 {
5288 if (tcp_flags & (TCP_FIN | TCP_RST)) {
5289 ofproto_rule_reduce_timeouts(&rule->up, idle_timeout, hard_timeout);
5290 }
5291 }
5292
5293 static void
5294 xlate_fin_timeout(struct xlate_ctx *ctx,
5295 const struct ofpact_fin_timeout *oft)
5296 {
5297 if (ctx->rule) {
5298 if (ctx->xin->allow_side_effects) {
5299 xlate_fin_timeout__(ctx->rule, ctx->xin->tcp_flags,
5300 oft->fin_idle_timeout, oft->fin_hard_timeout);
5301 }
5302 if (ctx->xin->xcache) {
5303 struct xc_entry *entry;
5304
5305 entry = xlate_cache_add_entry(ctx->xin->xcache, XC_FIN_TIMEOUT);
5306 /* XC_RULE already holds a reference on the rule, none is taken
5307 * here. */
5308 entry->fin.rule = ctx->rule;
5309 entry->fin.idle = oft->fin_idle_timeout;
5310 entry->fin.hard = oft->fin_hard_timeout;
5311 }
5312 }
5313 }
5314
5315 static void
5316 xlate_sample_action(struct xlate_ctx *ctx,
5317 const struct ofpact_sample *os)
5318 {
5319 odp_port_t output_odp_port = ODPP_NONE;
5320 odp_port_t tunnel_out_port = ODPP_NONE;
5321 struct dpif_ipfix *ipfix = ctx->xbridge->ipfix;
5322 bool emit_set_tunnel = false;
5323
5324 if (!ipfix || ctx->xin->flow.in_port.ofp_port == OFPP_NONE) {
5325 return;
5326 }
5327
5328 /* Scale the probability from 16-bit to 32-bit while representing
5329 * the same percentage. */
5330 uint32_t probability = (os->probability << 16) | os->probability;
5331
5332 /* If ofp_port in flow sample action is equel to ofp_port,
5333 * this sample action is a input port action. */
5334 if (os->sampling_port != OFPP_NONE &&
5335 os->sampling_port != ctx->xin->flow.in_port.ofp_port) {
5336 output_odp_port = ofp_port_to_odp_port(ctx->xbridge,
5337 os->sampling_port);
5338 if (output_odp_port == ODPP_NONE) {
5339 xlate_report_error(ctx, "can't use unknown port %d in flow sample "
5340 "action", os->sampling_port);
5341 return;
5342 }
5343
5344 if (dpif_ipfix_get_flow_exporter_tunnel_sampling(ipfix,
5345 os->collector_set_id)
5346 && dpif_ipfix_is_tunnel_port(ipfix, output_odp_port)) {
5347 tunnel_out_port = output_odp_port;
5348 emit_set_tunnel = true;
5349 }
5350 }
5351
5352 xlate_commit_actions(ctx);
5353 /* If 'emit_set_tunnel', sample(sampling_port=1) would translate
5354 * into datapath sample action set(tunnel(...)), sample(...) and
5355 * it is used for sampling egress tunnel information. */
5356 if (emit_set_tunnel) {
5357 const struct xport *xport = get_ofp_port(ctx->xbridge,
5358 os->sampling_port);
5359
5360 if (xport && xport->is_tunnel) {
5361 struct flow *flow = &ctx->xin->flow;
5362 tnl_port_send(xport->ofport, flow, ctx->wc);
5363 if (!ovs_native_tunneling_is_on(ctx->xbridge->ofproto)) {
5364 struct flow_tnl flow_tnl = flow->tunnel;
5365 const char *tnl_type;
5366
5367 tnl_type = tnl_port_get_type(xport->ofport);
5368 commit_odp_tunnel_action(flow, &ctx->base_flow,
5369 ctx->odp_actions, tnl_type);
5370 flow->tunnel = flow_tnl;
5371 }
5372 } else {
5373 xlate_report_error(ctx,
5374 "sampling_port:%d should be a tunnel port.",
5375 os->sampling_port);
5376 }
5377 }
5378
5379 struct user_action_cookie cookie = {
5380 .type = USER_ACTION_COOKIE_FLOW_SAMPLE,
5381 .ofp_in_port = ctx->xin->flow.in_port.ofp_port,
5382 .ofproto_uuid = ctx->xbridge->ofproto->uuid,
5383 .flow_sample = {
5384 .probability = os->probability,
5385 .collector_set_id = os->collector_set_id,
5386 .obs_domain_id = os->obs_domain_id,
5387 .obs_point_id = os->obs_point_id,
5388 .output_odp_port = output_odp_port,
5389 .direction = os->direction,
5390 }
5391 };
5392 compose_sample_action(ctx, probability, &cookie, tunnel_out_port, false);
5393 }
5394
5395 /* Determine if an datapath action translated from the openflow action
5396 * can be reversed by another datapath action.
5397 *
5398 * Openflow actions that do not emit datapath actions are trivially
5399 * reversible. Reversiblity of other actions depends on nature of
5400 * action and their translation. */
5401 static bool
5402 reversible_actions(const struct ofpact *ofpacts, size_t ofpacts_len)
5403 {
5404 const struct ofpact *a;
5405
5406 OFPACT_FOR_EACH (a, ofpacts, ofpacts_len) {
5407 switch (a->type) {
5408 case OFPACT_BUNDLE:
5409 case OFPACT_CLEAR_ACTIONS:
5410 case OFPACT_CLONE:
5411 case OFPACT_CONJUNCTION:
5412 case OFPACT_CONTROLLER:
5413 case OFPACT_CT_CLEAR:
5414 case OFPACT_DEBUG_RECIRC:
5415 case OFPACT_DEBUG_SLOW:
5416 case OFPACT_DEC_MPLS_TTL:
5417 case OFPACT_DEC_TTL:
5418 case OFPACT_ENQUEUE:
5419 case OFPACT_EXIT:
5420 case OFPACT_FIN_TIMEOUT:
5421 case OFPACT_GOTO_TABLE:
5422 case OFPACT_GROUP:
5423 case OFPACT_LEARN:
5424 case OFPACT_MULTIPATH:
5425 case OFPACT_NOTE:
5426 case OFPACT_OUTPUT:
5427 case OFPACT_OUTPUT_REG:
5428 case OFPACT_POP_MPLS:
5429 case OFPACT_POP_QUEUE:
5430 case OFPACT_PUSH_MPLS:
5431 case OFPACT_PUSH_VLAN:
5432 case OFPACT_REG_MOVE:
5433 case OFPACT_RESUBMIT:
5434 case OFPACT_SAMPLE:
5435 case OFPACT_SET_ETH_DST:
5436 case OFPACT_SET_ETH_SRC:
5437 case OFPACT_SET_FIELD:
5438 case OFPACT_SET_IP_DSCP:
5439 case OFPACT_SET_IP_ECN:
5440 case OFPACT_SET_IP_TTL:
5441 case OFPACT_SET_IPV4_DST:
5442 case OFPACT_SET_IPV4_SRC:
5443 case OFPACT_SET_L4_DST_PORT:
5444 case OFPACT_SET_L4_SRC_PORT:
5445 case OFPACT_SET_MPLS_LABEL:
5446 case OFPACT_SET_MPLS_TC:
5447 case OFPACT_SET_MPLS_TTL:
5448 case OFPACT_SET_QUEUE:
5449 case OFPACT_SET_TUNNEL:
5450 case OFPACT_SET_VLAN_PCP:
5451 case OFPACT_SET_VLAN_VID:
5452 case OFPACT_STACK_POP:
5453 case OFPACT_STACK_PUSH:
5454 case OFPACT_STRIP_VLAN:
5455 case OFPACT_UNROLL_XLATE:
5456 case OFPACT_WRITE_ACTIONS:
5457 case OFPACT_WRITE_METADATA:
5458 break;
5459
5460 case OFPACT_CT:
5461 case OFPACT_METER:
5462 case OFPACT_NAT:
5463 case OFPACT_OUTPUT_TRUNC:
5464 case OFPACT_ENCAP:
5465 case OFPACT_DECAP:
5466 case OFPACT_DEC_NSH_TTL:
5467 return false;
5468 }
5469 }
5470 return true;
5471 }
5472
5473 static void
5474 clone_xlate_actions(const struct ofpact *actions, size_t actions_len,
5475 struct xlate_ctx *ctx, bool is_last_action)
5476 {
5477 struct ofpbuf old_stack = ctx->stack;
5478 union mf_subvalue new_stack[1024 / sizeof(union mf_subvalue)];
5479 ofpbuf_use_stub(&ctx->stack, new_stack, sizeof new_stack);
5480 ofpbuf_put(&ctx->stack, old_stack.data, old_stack.size);
5481
5482 struct ofpbuf old_action_set = ctx->action_set;
5483 uint64_t actset_stub[1024 / 8];
5484 ofpbuf_use_stub(&ctx->action_set, actset_stub, sizeof actset_stub);
5485 ofpbuf_put(&ctx->action_set, old_action_set.data, old_action_set.size);
5486
5487 size_t offset, ac_offset;
5488 struct flow old_flow = ctx->xin->flow;
5489
5490 if (reversible_actions(actions, actions_len) || is_last_action) {
5491 old_flow = ctx->xin->flow;
5492 do_xlate_actions(actions, actions_len, ctx, is_last_action);
5493 if (!ctx->freezing) {
5494 xlate_action_set(ctx);
5495 }
5496 if (ctx->freezing) {
5497 finish_freezing(ctx);
5498 }
5499 goto xlate_done;
5500 }
5501
5502 /* Commit datapath actions before emitting the clone action to
5503 * avoid emitting those actions twice. Once inside
5504 * the clone, another time for the action after clone. */
5505 xlate_commit_actions(ctx);
5506 struct flow old_base = ctx->base_flow;
5507 bool old_was_mpls = ctx->was_mpls;
5508 bool old_conntracked = ctx->conntracked;
5509
5510 /* The actions are not reversible, a datapath clone action is
5511 * required to encode the translation. Select the clone action
5512 * based on datapath capabilities. */
5513 if (ctx->xbridge->support.clone) { /* Use clone action */
5514 /* Use clone action as datapath clone. */
5515 offset = nl_msg_start_nested(ctx->odp_actions, OVS_ACTION_ATTR_CLONE);
5516 do_xlate_actions(actions, actions_len, ctx, true);
5517 if (!ctx->freezing) {
5518 xlate_action_set(ctx);
5519 }
5520 if (ctx->freezing) {
5521 finish_freezing(ctx);
5522 }
5523 nl_msg_end_non_empty_nested(ctx->odp_actions, offset);
5524 goto dp_clone_done;
5525 }
5526
5527 if (ctx->xbridge->support.sample_nesting > 3) {
5528 /* Use sample action as datapath clone. */
5529 offset = nl_msg_start_nested(ctx->odp_actions, OVS_ACTION_ATTR_SAMPLE);
5530 ac_offset = nl_msg_start_nested(ctx->odp_actions,
5531 OVS_SAMPLE_ATTR_ACTIONS);
5532 do_xlate_actions(actions, actions_len, ctx, true);
5533 if (!ctx->freezing) {
5534 xlate_action_set(ctx);
5535 }
5536 if (ctx->freezing) {
5537 finish_freezing(ctx);
5538 }
5539 if (nl_msg_end_non_empty_nested(ctx->odp_actions, ac_offset)) {
5540 nl_msg_cancel_nested(ctx->odp_actions, offset);
5541 } else {
5542 nl_msg_put_u32(ctx->odp_actions, OVS_SAMPLE_ATTR_PROBABILITY,
5543 UINT32_MAX); /* 100% probability. */
5544 nl_msg_end_nested(ctx->odp_actions, offset);
5545 }
5546 goto dp_clone_done;
5547 }
5548
5549 /* Datapath does not support clone, skip xlate 'oc' and
5550 * report an error */
5551 xlate_report_error(ctx, "Failed to compose clone action");
5552
5553 dp_clone_done:
5554 /* The clone's conntrack execution should have no effect on the original
5555 * packet. */
5556 ctx->conntracked = old_conntracked;
5557
5558 /* Popping MPLS from the clone should have no effect on the original
5559 * packet. */
5560 ctx->was_mpls = old_was_mpls;
5561
5562 /* Restore the 'base_flow' for the next action. */
5563 ctx->base_flow = old_base;
5564
5565 xlate_done:
5566 ofpbuf_uninit(&ctx->action_set);
5567 ctx->action_set = old_action_set;
5568 ofpbuf_uninit(&ctx->stack);
5569 ctx->stack = old_stack;
5570 ctx->xin->flow = old_flow;
5571 }
5572
5573 static void
5574 compose_clone(struct xlate_ctx *ctx, const struct ofpact_nest *oc,
5575 bool is_last_action)
5576 {
5577 size_t oc_actions_len = ofpact_nest_get_action_len(oc);
5578
5579 clone_xlate_actions(oc->actions, oc_actions_len, ctx, is_last_action);
5580 }
5581
5582 static void
5583 xlate_meter_action(struct xlate_ctx *ctx, const struct ofpact_meter *meter)
5584 {
5585 if (meter->provider_meter_id != UINT32_MAX) {
5586 nl_msg_put_u32(ctx->odp_actions, OVS_ACTION_ATTR_METER,
5587 meter->provider_meter_id);
5588 }
5589 }
5590
5591 static bool
5592 may_receive(const struct xport *xport, struct xlate_ctx *ctx)
5593 {
5594 if (xport->config & (is_stp(&ctx->xin->flow)
5595 ? OFPUTIL_PC_NO_RECV_STP
5596 : OFPUTIL_PC_NO_RECV)) {
5597 return false;
5598 }
5599
5600 /* Only drop packets here if both forwarding and learning are
5601 * disabled. If just learning is enabled, we need to have
5602 * OFPP_NORMAL and the learning action have a look at the packet
5603 * before we can drop it. */
5604 if ((!xport_stp_forward_state(xport) && !xport_stp_learn_state(xport)) ||
5605 (!xport_rstp_forward_state(xport) && !xport_rstp_learn_state(xport))) {
5606 return false;
5607 }
5608
5609 return true;
5610 }
5611
5612 static void
5613 xlate_write_actions__(struct xlate_ctx *ctx,
5614 const struct ofpact *ofpacts, size_t ofpacts_len)
5615 {
5616 /* Maintain actset_output depending on the contents of the action set:
5617 *
5618 * - OFPP_UNSET, if there is no "output" action.
5619 *
5620 * - The output port, if there is an "output" action and no "group"
5621 * action.
5622 *
5623 * - OFPP_UNSET, if there is a "group" action.
5624 */
5625 if (!ctx->action_set_has_group) {
5626 const struct ofpact *a;
5627 OFPACT_FOR_EACH (a, ofpacts, ofpacts_len) {
5628 if (a->type == OFPACT_OUTPUT) {
5629 ctx->xin->flow.actset_output = ofpact_get_OUTPUT(a)->port;
5630 } else if (a->type == OFPACT_GROUP) {
5631 ctx->xin->flow.actset_output = OFPP_UNSET;
5632 ctx->action_set_has_group = true;
5633 break;
5634 }
5635 }
5636 }
5637
5638 ofpbuf_put(&ctx->action_set, ofpacts, ofpacts_len);
5639 }
5640
5641 static void
5642 xlate_write_actions(struct xlate_ctx *ctx, const struct ofpact_nest *a)
5643 {
5644 xlate_write_actions__(ctx, a->actions, ofpact_nest_get_action_len(a));
5645 }
5646
5647 static void
5648 xlate_action_set(struct xlate_ctx *ctx)
5649 {
5650 uint64_t action_list_stub[1024 / 8];
5651 struct ofpbuf action_list = OFPBUF_STUB_INITIALIZER(action_list_stub);
5652 ofpacts_execute_action_set(&action_list, &ctx->action_set);
5653 /* Clear the action set, as it is not needed any more. */
5654 ofpbuf_clear(&ctx->action_set);
5655 if (action_list.size) {
5656 ctx->in_action_set = true;
5657
5658 struct ovs_list *old_trace = ctx->xin->trace;
5659 ctx->xin->trace = xlate_report(ctx, OFT_TABLE,
5660 "--. Executing action set:");
5661 do_xlate_actions(action_list.data, action_list.size, ctx, true);
5662 ctx->xin->trace = old_trace;
5663
5664 ctx->in_action_set = false;
5665 }
5666 ofpbuf_uninit(&action_list);
5667 }
5668
5669 static void
5670 freeze_put_unroll_xlate(struct xlate_ctx *ctx)
5671 {
5672 struct ofpact_unroll_xlate *unroll = ctx->frozen_actions.header;
5673
5674 /* Restore the table_id and rule cookie for a potential PACKET
5675 * IN if needed. */
5676 if (!unroll ||
5677 (ctx->table_id != unroll->rule_table_id
5678 || ctx->rule_cookie != unroll->rule_cookie)) {
5679 unroll = ofpact_put_UNROLL_XLATE(&ctx->frozen_actions);
5680 unroll->rule_table_id = ctx->table_id;
5681 unroll->rule_cookie = ctx->rule_cookie;
5682 ctx->frozen_actions.header = unroll;
5683 }
5684 }
5685
5686
5687 /* Copy actions 'a' through 'end' to ctx->frozen_actions, which will be
5688 * executed after thawing. Inserts an UNROLL_XLATE action, if none is already
5689 * present, before any action that may depend on the current table ID or flow
5690 * cookie. */
5691 static void
5692 freeze_unroll_actions(const struct ofpact *a, const struct ofpact *end,
5693 struct xlate_ctx *ctx)
5694 {
5695 for (; a < end; a = ofpact_next(a)) {
5696 switch (a->type) {
5697 case OFPACT_OUTPUT_REG:
5698 case OFPACT_OUTPUT_TRUNC:
5699 case OFPACT_GROUP:
5700 case OFPACT_OUTPUT:
5701 case OFPACT_CONTROLLER:
5702 case OFPACT_DEC_MPLS_TTL:
5703 case OFPACT_DEC_NSH_TTL:
5704 case OFPACT_DEC_TTL:
5705 /* These actions may generate asynchronous messages, which include
5706 * table ID and flow cookie information. */
5707 freeze_put_unroll_xlate(ctx);
5708 break;
5709
5710 case OFPACT_RESUBMIT:
5711 if (ofpact_get_RESUBMIT(a)->table_id == 0xff) {
5712 /* This resubmit action is relative to the current table, so we
5713 * need to track what table that is.*/
5714 freeze_put_unroll_xlate(ctx);
5715 }
5716 break;
5717
5718 case OFPACT_SET_TUNNEL:
5719 case OFPACT_REG_MOVE:
5720 case OFPACT_SET_FIELD:
5721 case OFPACT_STACK_PUSH:
5722 case OFPACT_STACK_POP:
5723 case OFPACT_LEARN:
5724 case OFPACT_WRITE_METADATA:
5725 case OFPACT_GOTO_TABLE:
5726 case OFPACT_ENQUEUE:
5727 case OFPACT_SET_VLAN_VID:
5728 case OFPACT_SET_VLAN_PCP:
5729 case OFPACT_STRIP_VLAN:
5730 case OFPACT_PUSH_VLAN:
5731 case OFPACT_SET_ETH_SRC:
5732 case OFPACT_SET_ETH_DST:
5733 case OFPACT_SET_IPV4_SRC:
5734 case OFPACT_SET_IPV4_DST:
5735 case OFPACT_SET_IP_DSCP:
5736 case OFPACT_SET_IP_ECN:
5737 case OFPACT_SET_IP_TTL:
5738 case OFPACT_SET_L4_SRC_PORT:
5739 case OFPACT_SET_L4_DST_PORT:
5740 case OFPACT_SET_QUEUE:
5741 case OFPACT_POP_QUEUE:
5742 case OFPACT_PUSH_MPLS:
5743 case OFPACT_POP_MPLS:
5744 case OFPACT_SET_MPLS_LABEL:
5745 case OFPACT_SET_MPLS_TC:
5746 case OFPACT_SET_MPLS_TTL:
5747 case OFPACT_MULTIPATH:
5748 case OFPACT_BUNDLE:
5749 case OFPACT_EXIT:
5750 case OFPACT_UNROLL_XLATE:
5751 case OFPACT_FIN_TIMEOUT:
5752 case OFPACT_CLEAR_ACTIONS:
5753 case OFPACT_WRITE_ACTIONS:
5754 case OFPACT_METER:
5755 case OFPACT_SAMPLE:
5756 case OFPACT_CLONE:
5757 case OFPACT_ENCAP:
5758 case OFPACT_DECAP:
5759 case OFPACT_DEBUG_RECIRC:
5760 case OFPACT_DEBUG_SLOW:
5761 case OFPACT_CT:
5762 case OFPACT_CT_CLEAR:
5763 case OFPACT_NAT:
5764 /* These may not generate PACKET INs. */
5765 break;
5766
5767 case OFPACT_NOTE:
5768 case OFPACT_CONJUNCTION:
5769 /* These need not be copied for restoration. */
5770 continue;
5771 }
5772 /* Copy the action over. */
5773 ofpbuf_put(&ctx->frozen_actions, a, OFPACT_ALIGN(a->len));
5774 }
5775 }
5776
5777 static void
5778 put_ct_mark(const struct flow *flow, struct ofpbuf *odp_actions,
5779 struct flow_wildcards *wc)
5780 {
5781 if (wc->masks.ct_mark) {
5782 struct {
5783 uint32_t key;
5784 uint32_t mask;
5785 } *odp_ct_mark;
5786
5787 odp_ct_mark = nl_msg_put_unspec_uninit(odp_actions, OVS_CT_ATTR_MARK,
5788 sizeof(*odp_ct_mark));
5789 odp_ct_mark->key = flow->ct_mark & wc->masks.ct_mark;
5790 odp_ct_mark->mask = wc->masks.ct_mark;
5791 }
5792 }
5793
5794 static void
5795 put_ct_label(const struct flow *flow, struct ofpbuf *odp_actions,
5796 struct flow_wildcards *wc)
5797 {
5798 if (!ovs_u128_is_zero(wc->masks.ct_label)) {
5799 struct {
5800 ovs_u128 key;
5801 ovs_u128 mask;
5802 } odp_ct_label;
5803
5804 odp_ct_label.key = ovs_u128_and(flow->ct_label, wc->masks.ct_label);
5805 odp_ct_label.mask = wc->masks.ct_label;
5806 nl_msg_put_unspec(odp_actions, OVS_CT_ATTR_LABELS,
5807 &odp_ct_label, sizeof odp_ct_label);
5808 }
5809 }
5810
5811 static void
5812 put_ct_helper(struct xlate_ctx *ctx,
5813 struct ofpbuf *odp_actions, struct ofpact_conntrack *ofc)
5814 {
5815 if (ofc->alg) {
5816 switch(ofc->alg) {
5817 case IPPORT_FTP:
5818 nl_msg_put_string(odp_actions, OVS_CT_ATTR_HELPER, "ftp");
5819 break;
5820 case IPPORT_TFTP:
5821 nl_msg_put_string(odp_actions, OVS_CT_ATTR_HELPER, "tftp");
5822 break;
5823 default:
5824 xlate_report_error(ctx, "cannot serialize ct_helper %d", ofc->alg);
5825 break;
5826 }
5827 }
5828 }
5829
5830 static void
5831 put_ct_nat(struct xlate_ctx *ctx)
5832 {
5833 struct ofpact_nat *ofn = ctx->ct_nat_action;
5834 size_t nat_offset;
5835
5836 if (!ofn) {
5837 return;
5838 }
5839
5840 nat_offset = nl_msg_start_nested(ctx->odp_actions, OVS_CT_ATTR_NAT);
5841 if (ofn->flags & NX_NAT_F_SRC || ofn->flags & NX_NAT_F_DST) {
5842 nl_msg_put_flag(ctx->odp_actions, ofn->flags & NX_NAT_F_SRC
5843 ? OVS_NAT_ATTR_SRC : OVS_NAT_ATTR_DST);
5844 if (ofn->flags & NX_NAT_F_PERSISTENT) {
5845 nl_msg_put_flag(ctx->odp_actions, OVS_NAT_ATTR_PERSISTENT);
5846 }
5847 if (ofn->flags & NX_NAT_F_PROTO_HASH) {
5848 nl_msg_put_flag(ctx->odp_actions, OVS_NAT_ATTR_PROTO_HASH);
5849 } else if (ofn->flags & NX_NAT_F_PROTO_RANDOM) {
5850 nl_msg_put_flag(ctx->odp_actions, OVS_NAT_ATTR_PROTO_RANDOM);
5851 }
5852 if (ofn->range_af == AF_INET) {
5853 nl_msg_put_be32(ctx->odp_actions, OVS_NAT_ATTR_IP_MIN,
5854 ofn->range.addr.ipv4.min);
5855 if (ofn->range.addr.ipv4.max &&
5856 (ntohl(ofn->range.addr.ipv4.max)
5857 > ntohl(ofn->range.addr.ipv4.min))) {
5858 nl_msg_put_be32(ctx->odp_actions, OVS_NAT_ATTR_IP_MAX,
5859 ofn->range.addr.ipv4.max);
5860 }
5861 } else if (ofn->range_af == AF_INET6) {
5862 nl_msg_put_unspec(ctx->odp_actions, OVS_NAT_ATTR_IP_MIN,
5863 &ofn->range.addr.ipv6.min,
5864 sizeof ofn->range.addr.ipv6.min);
5865 if (!ipv6_mask_is_any(&ofn->range.addr.ipv6.max) &&
5866 memcmp(&ofn->range.addr.ipv6.max, &ofn->range.addr.ipv6.min,
5867 sizeof ofn->range.addr.ipv6.max) > 0) {
5868 nl_msg_put_unspec(ctx->odp_actions, OVS_NAT_ATTR_IP_MAX,
5869 &ofn->range.addr.ipv6.max,
5870 sizeof ofn->range.addr.ipv6.max);
5871 }
5872 }
5873 if (ofn->range_af != AF_UNSPEC && ofn->range.proto.min) {
5874 nl_msg_put_u16(ctx->odp_actions, OVS_NAT_ATTR_PROTO_MIN,
5875 ofn->range.proto.min);
5876 if (ofn->range.proto.max &&
5877 ofn->range.proto.max > ofn->range.proto.min) {
5878 nl_msg_put_u16(ctx->odp_actions, OVS_NAT_ATTR_PROTO_MAX,
5879 ofn->range.proto.max);
5880 }
5881 }
5882 }
5883 nl_msg_end_nested(ctx->odp_actions, nat_offset);
5884 }
5885
5886 static void
5887 compose_conntrack_action(struct xlate_ctx *ctx, struct ofpact_conntrack *ofc,
5888 bool is_last_action)
5889 {
5890 ovs_u128 old_ct_label_mask = ctx->wc->masks.ct_label;
5891 uint32_t old_ct_mark_mask = ctx->wc->masks.ct_mark;
5892 size_t ct_offset;
5893 uint16_t zone;
5894
5895 /* Ensure that any prior actions are applied before composing the new
5896 * conntrack action. */
5897 xlate_commit_actions(ctx);
5898
5899 /* Process nested actions first, to populate the key. */
5900 ctx->ct_nat_action = NULL;
5901 ctx->wc->masks.ct_mark = 0;
5902 ctx->wc->masks.ct_label = OVS_U128_ZERO;
5903 do_xlate_actions(ofc->actions, ofpact_ct_get_action_len(ofc), ctx,
5904 is_last_action);
5905
5906 if (ofc->zone_src.field) {
5907 zone = mf_get_subfield(&ofc->zone_src, &ctx->xin->flow);
5908 } else {
5909 zone = ofc->zone_imm;
5910 }
5911
5912 ct_offset = nl_msg_start_nested(ctx->odp_actions, OVS_ACTION_ATTR_CT);
5913 if (ofc->flags & NX_CT_F_COMMIT) {
5914 nl_msg_put_flag(ctx->odp_actions, ofc->flags & NX_CT_F_FORCE ?
5915 OVS_CT_ATTR_FORCE_COMMIT : OVS_CT_ATTR_COMMIT);
5916 if (ctx->xbridge->support.ct_eventmask) {
5917 nl_msg_put_u32(ctx->odp_actions, OVS_CT_ATTR_EVENTMASK,
5918 OVS_CT_EVENTMASK_DEFAULT);
5919 }
5920 }
5921 nl_msg_put_u16(ctx->odp_actions, OVS_CT_ATTR_ZONE, zone);
5922 put_ct_mark(&ctx->xin->flow, ctx->odp_actions, ctx->wc);
5923 put_ct_label(&ctx->xin->flow, ctx->odp_actions, ctx->wc);
5924 put_ct_helper(ctx, ctx->odp_actions, ofc);
5925 put_ct_nat(ctx);
5926 ctx->ct_nat_action = NULL;
5927 nl_msg_end_nested(ctx->odp_actions, ct_offset);
5928
5929 ctx->wc->masks.ct_mark = old_ct_mark_mask;
5930 ctx->wc->masks.ct_label = old_ct_label_mask;
5931
5932 if (ofc->recirc_table != NX_CT_RECIRC_NONE) {
5933 ctx->conntracked = true;
5934 compose_recirculate_and_fork(ctx, ofc->recirc_table, zone);
5935 }
5936
5937 /* The ct_* fields are only available in the scope of the 'recirc_table'
5938 * call chain. */
5939 flow_clear_conntrack(&ctx->xin->flow);
5940 xlate_report(ctx, OFT_DETAIL, "Sets the packet to an untracked state, "
5941 "and clears all the conntrack fields.");
5942 ctx->conntracked = false;
5943 }
5944
5945 static void
5946 compose_ct_clear_action(struct xlate_ctx *ctx)
5947 {
5948 clear_conntrack(ctx);
5949 /* This action originally existed without dpif support. So to preserve
5950 * compatibility, only append it if the dpif supports it. */
5951 if (ctx->xbridge->support.ct_clear) {
5952 nl_msg_put_flag(ctx->odp_actions, OVS_ACTION_ATTR_CT_CLEAR);
5953 }
5954 }
5955
5956 static void
5957 rewrite_flow_encap_ethernet(struct xlate_ctx *ctx,
5958 struct flow *flow,
5959 struct flow_wildcards *wc)
5960 {
5961 wc->masks.packet_type = OVS_BE32_MAX;
5962 if (pt_ns(flow->packet_type) == OFPHTN_ETHERTYPE) {
5963 /* Only adjust the packet_type and zero the dummy Ethernet addresses. */
5964 ovs_be16 ethertype = pt_ns_type_be(flow->packet_type);
5965 flow->packet_type = htonl(PT_ETH);
5966 flow->dl_src = eth_addr_zero;
5967 flow->dl_dst = eth_addr_zero;
5968 flow->dl_type = ethertype;
5969 } else {
5970 /* Error handling: drop packet. */
5971 xlate_report_debug(ctx, OFT_ACTION,
5972 "Dropping packet as encap(ethernet) is not "
5973 "supported for packet type ethernet.");
5974 ctx->error = XLATE_UNSUPPORTED_PACKET_TYPE;
5975 }
5976 }
5977
5978 /* For an MD2 NSH header returns a pointer to an ofpbuf with the encoded
5979 * MD2 TLVs provided as encap properties to the encap operation. This
5980 * will be stored as encap_data in the ctx and copied into the push_nsh
5981 * action at the next commit. */
5982 static struct ofpbuf *
5983 rewrite_flow_push_nsh(struct xlate_ctx *ctx,
5984 const struct ofpact_encap *encap,
5985 struct flow *flow,
5986 struct flow_wildcards *wc)
5987 {
5988 ovs_be32 packet_type = flow->packet_type;
5989 const char *ptr = (char *) encap->props;
5990 struct ofpbuf *buf = ofpbuf_new(NSH_CTX_HDRS_MAX_LEN);
5991 uint8_t md_type = NSH_M_TYPE1;
5992 uint8_t np = 0;
5993 int i;
5994
5995 /* Scan the optional NSH encap TLV properties, if any. */
5996 for (i = 0; i < encap->n_props; i++) {
5997 struct ofpact_ed_prop *prop_ptr =
5998 ALIGNED_CAST(struct ofpact_ed_prop *, ptr);
5999 if (prop_ptr->prop_class == OFPPPC_NSH) {
6000 switch (prop_ptr->type) {
6001 case OFPPPT_PROP_NSH_MDTYPE: {
6002 struct ofpact_ed_prop_nsh_md_type *prop_md_type =
6003 ALIGNED_CAST(struct ofpact_ed_prop_nsh_md_type *,
6004 prop_ptr);
6005 md_type = prop_md_type->md_type;
6006 break;
6007 }
6008 case OFPPPT_PROP_NSH_TLV: {
6009 struct ofpact_ed_prop_nsh_tlv *tlv_prop =
6010 ALIGNED_CAST(struct ofpact_ed_prop_nsh_tlv *,
6011 prop_ptr);
6012 struct nsh_md2_tlv *md2_ctx =
6013 ofpbuf_put_uninit(buf, sizeof(*md2_ctx));
6014 md2_ctx->md_class = tlv_prop->tlv_class;
6015 md2_ctx->type = tlv_prop->tlv_type;
6016 md2_ctx->length = tlv_prop->tlv_len;
6017 size_t len = ROUND_UP(md2_ctx->length, 4);
6018 size_t padding = len - md2_ctx->length;
6019 ofpbuf_put(buf, tlv_prop->data, md2_ctx->length);
6020 ofpbuf_put_zeros(buf, padding);
6021 break;
6022 }
6023 default:
6024 /* No other NSH encap properties defined yet. */
6025 break;
6026 }
6027 }
6028 ptr += ROUND_UP(prop_ptr->len, 8);
6029 }
6030 if (buf->size == 0 || buf->size > NSH_CTX_HDRS_MAX_LEN) {
6031 ofpbuf_delete(buf);
6032 buf = NULL;
6033 }
6034
6035 /* Determine the Next Protocol field for NSH header. */
6036 switch (ntohl(packet_type)) {
6037 case PT_ETH:
6038 np = NSH_P_ETHERNET;
6039 break;
6040 case PT_IPV4:
6041 np = NSH_P_IPV4;
6042 break;
6043 case PT_IPV6:
6044 np = NSH_P_IPV6;
6045 break;
6046 case PT_NSH:
6047 np = NSH_P_NSH;
6048 break;
6049 default:
6050 /* Error handling: drop packet. */
6051 xlate_report_debug(ctx, OFT_ACTION,
6052 "Dropping packet as encap(nsh) is not "
6053 "supported for packet type (%d,0x%x)",
6054 pt_ns(packet_type), pt_ns_type(packet_type));
6055 ctx->error = XLATE_UNSUPPORTED_PACKET_TYPE;
6056 return buf;
6057 }
6058 /* Note that we have matched on packet_type! */
6059 wc->masks.packet_type = OVS_BE32_MAX;
6060
6061 /* Reset all current flow packet headers. */
6062 memset(&flow->dl_dst, 0,
6063 sizeof(struct flow) - offsetof(struct flow, dl_dst));
6064
6065 /* Populate the flow with the new NSH header. */
6066 flow->packet_type = htonl(PT_NSH);
6067 flow->dl_type = htons(ETH_TYPE_NSH);
6068 flow->nsh.flags = 0;
6069 flow->nsh.ttl = 63;
6070 flow->nsh.np = np;
6071 flow->nsh.path_hdr = htonl(255);
6072
6073 if (md_type == NSH_M_TYPE1) {
6074 flow->nsh.mdtype = NSH_M_TYPE1;
6075 memset(flow->nsh.context, 0, sizeof flow->nsh.context);
6076 if (buf) {
6077 /* Drop any MD2 context TLVs. */
6078 ofpbuf_delete(buf);
6079 buf = NULL;
6080 }
6081 } else if (md_type == NSH_M_TYPE2) {
6082 flow->nsh.mdtype = NSH_M_TYPE2;
6083 }
6084 flow->nsh.mdtype &= NSH_MDTYPE_MASK;
6085
6086 return buf;
6087 }
6088
6089 static void
6090 xlate_generic_encap_action(struct xlate_ctx *ctx,
6091 const struct ofpact_encap *encap)
6092 {
6093 struct flow *flow = &ctx->xin->flow;
6094 struct flow_wildcards *wc = ctx->wc;
6095 struct ofpbuf *encap_data = NULL;
6096
6097 /* Ensure that any pending actions on the inner packet are applied before
6098 * rewriting the flow */
6099 xlate_commit_actions(ctx);
6100
6101 /* Rewrite the flow to reflect the effect of pushing the new encap header. */
6102 switch (ntohl(encap->new_pkt_type)) {
6103 case PT_ETH:
6104 rewrite_flow_encap_ethernet(ctx, flow, wc);
6105 break;
6106 case PT_NSH:
6107 encap_data = rewrite_flow_push_nsh(ctx, encap, flow, wc);
6108 break;
6109 default:
6110 /* New packet type was checked during decoding. */
6111 OVS_NOT_REACHED();
6112 }
6113
6114 if (!ctx->error) {
6115 /* The actual encap datapath action will be generated at next commit. */
6116 ctx->pending_encap = true;
6117 ctx->encap_data = encap_data;
6118 }
6119 }
6120
6121 /* Returns true if packet must be recirculated after decapsulation. */
6122 static bool
6123 xlate_generic_decap_action(struct xlate_ctx *ctx,
6124 const struct ofpact_decap *decap OVS_UNUSED)
6125 {
6126 struct flow *flow = &ctx->xin->flow;
6127
6128 /* Ensure that any pending actions on the current packet are applied
6129 * before generating the decap action. */
6130 xlate_commit_actions(ctx);
6131
6132 /* We assume for now that the new_pkt_type is PT_USE_NEXT_PROTO. */
6133 switch (ntohl(flow->packet_type)) {
6134 case PT_ETH:
6135 if (flow->vlans[0].tci & htons(VLAN_CFI)) {
6136 /* Error handling: drop packet. */
6137 xlate_report_debug(ctx, OFT_ACTION, "Dropping packet, cannot "
6138 "decap Ethernet if VLAN is present.");
6139 ctx->error = XLATE_UNSUPPORTED_PACKET_TYPE;
6140 } else {
6141 /* Just change the packet_type.
6142 * Delay generating pop_eth to the next commit. */
6143 flow->packet_type = htonl(PACKET_TYPE(OFPHTN_ETHERTYPE,
6144 ntohs(flow->dl_type)));
6145 ctx->wc->masks.dl_type = OVS_BE16_MAX;
6146 }
6147 return false;
6148 case PT_NSH:
6149 /* The pop_nsh action is generated at the commit executed as
6150 * part of freezing the ctx for recirculation. Here we just set
6151 * the new packet type based on the NSH next protocol field. */
6152 switch (flow->nsh.np) {
6153 case NSH_P_ETHERNET:
6154 flow->packet_type = htonl(PT_ETH);
6155 break;
6156 case NSH_P_IPV4:
6157 flow->packet_type = htonl(PT_IPV4);
6158 break;
6159 case NSH_P_IPV6:
6160 flow->packet_type = htonl(PT_IPV6);
6161 break;
6162 case NSH_P_NSH:
6163 flow->packet_type = htonl(PT_NSH);
6164 break;
6165 default:
6166 /* Error handling: drop packet. */
6167 xlate_report_debug(ctx, OFT_ACTION,
6168 "Dropping packet as NSH next protocol %d "
6169 "is not supported", flow->nsh.np);
6170 ctx->error = XLATE_UNSUPPORTED_PACKET_TYPE;
6171 return false;
6172 break;
6173 }
6174 ctx->wc->masks.nsh.np = UINT8_MAX;
6175 ctx->pending_decap = true;
6176 /* Trigger recirculation. */
6177 return true;
6178 default:
6179 /* Error handling: drop packet. */
6180 xlate_report_debug(
6181 ctx, OFT_ACTION,
6182 "Dropping packet as the decap() does not support "
6183 "packet type (%d,0x%x)",
6184 pt_ns(flow->packet_type), pt_ns_type(flow->packet_type));
6185 ctx->error = XLATE_UNSUPPORTED_PACKET_TYPE;
6186 return false;
6187 }
6188 }
6189
6190 static void
6191 recirc_for_mpls(const struct ofpact *a, struct xlate_ctx *ctx)
6192 {
6193 /* No need to recirculate if already exiting. */
6194 if (ctx->exit) {
6195 return;
6196 }
6197
6198 /* Do not consider recirculating unless the packet was previously MPLS. */
6199 if (!ctx->was_mpls) {
6200 return;
6201 }
6202
6203 /* Special case these actions, only recirculating if necessary.
6204 * This avoids the overhead of recirculation in common use-cases.
6205 */
6206 switch (a->type) {
6207
6208 /* Output actions do not require recirculation. */
6209 case OFPACT_OUTPUT:
6210 case OFPACT_OUTPUT_TRUNC:
6211 case OFPACT_ENQUEUE:
6212 case OFPACT_OUTPUT_REG:
6213 /* Set actions that don't touch L3+ fields do not require recirculation. */
6214 case OFPACT_SET_VLAN_VID:
6215 case OFPACT_SET_VLAN_PCP:
6216 case OFPACT_SET_ETH_SRC:
6217 case OFPACT_SET_ETH_DST:
6218 case OFPACT_SET_TUNNEL:
6219 case OFPACT_SET_QUEUE:
6220 /* If actions of a group require recirculation that can be detected
6221 * when translating them. */
6222 case OFPACT_GROUP:
6223 return;
6224
6225 /* Set field that don't touch L3+ fields don't require recirculation. */
6226 case OFPACT_SET_FIELD:
6227 if (mf_is_l3_or_higher(ofpact_get_SET_FIELD(a)->field)) {
6228 break;
6229 }
6230 return;
6231
6232 /* For simplicity, recirculate in all other cases. */
6233 case OFPACT_CONTROLLER:
6234 case OFPACT_BUNDLE:
6235 case OFPACT_STRIP_VLAN:
6236 case OFPACT_PUSH_VLAN:
6237 case OFPACT_SET_IPV4_SRC:
6238 case OFPACT_SET_IPV4_DST:
6239 case OFPACT_SET_IP_DSCP:
6240 case OFPACT_SET_IP_ECN:
6241 case OFPACT_SET_IP_TTL:
6242 case OFPACT_SET_L4_SRC_PORT:
6243 case OFPACT_SET_L4_DST_PORT:
6244 case OFPACT_REG_MOVE:
6245 case OFPACT_STACK_PUSH:
6246 case OFPACT_STACK_POP:
6247 case OFPACT_DEC_TTL:
6248 case OFPACT_SET_MPLS_LABEL:
6249 case OFPACT_SET_MPLS_TC:
6250 case OFPACT_SET_MPLS_TTL:
6251 case OFPACT_DEC_MPLS_TTL:
6252 case OFPACT_PUSH_MPLS:
6253 case OFPACT_POP_MPLS:
6254 case OFPACT_POP_QUEUE:
6255 case OFPACT_FIN_TIMEOUT:
6256 case OFPACT_RESUBMIT:
6257 case OFPACT_LEARN:
6258 case OFPACT_CONJUNCTION:
6259 case OFPACT_MULTIPATH:
6260 case OFPACT_NOTE:
6261 case OFPACT_EXIT:
6262 case OFPACT_SAMPLE:
6263 case OFPACT_CLONE:
6264 case OFPACT_ENCAP:
6265 case OFPACT_DECAP:
6266 case OFPACT_DEC_NSH_TTL:
6267 case OFPACT_UNROLL_XLATE:
6268 case OFPACT_CT:
6269 case OFPACT_CT_CLEAR:
6270 case OFPACT_NAT:
6271 case OFPACT_DEBUG_RECIRC:
6272 case OFPACT_DEBUG_SLOW:
6273 case OFPACT_METER:
6274 case OFPACT_CLEAR_ACTIONS:
6275 case OFPACT_WRITE_ACTIONS:
6276 case OFPACT_WRITE_METADATA:
6277 case OFPACT_GOTO_TABLE:
6278 default:
6279 break;
6280 }
6281
6282 /* Recirculate */
6283 ctx_trigger_freeze(ctx);
6284 }
6285
6286 static void
6287 xlate_ofpact_reg_move(struct xlate_ctx *ctx, const struct ofpact_reg_move *a)
6288 {
6289 mf_subfield_copy(&a->src, &a->dst, &ctx->xin->flow, ctx->wc);
6290 xlate_report_subfield(ctx, &a->dst);
6291 }
6292
6293 static void
6294 xlate_ofpact_stack_pop(struct xlate_ctx *ctx, const struct ofpact_stack *a)
6295 {
6296 if (nxm_execute_stack_pop(a, &ctx->xin->flow, ctx->wc, &ctx->stack)) {
6297 xlate_report_subfield(ctx, &a->subfield);
6298 } else {
6299 xlate_report_error(ctx, "stack underflow");
6300 }
6301 }
6302
6303 /* Restore translation context data that was stored earlier. */
6304 static void
6305 xlate_ofpact_unroll_xlate(struct xlate_ctx *ctx,
6306 const struct ofpact_unroll_xlate *a)
6307 {
6308 ctx->table_id = a->rule_table_id;
6309 ctx->rule_cookie = a->rule_cookie;
6310 xlate_report(ctx, OFT_THAW, "restored state: table=%"PRIu8", "
6311 "cookie=%#"PRIx64, a->rule_table_id, a->rule_cookie);
6312 }
6313
6314 static void
6315 do_xlate_actions(const struct ofpact *ofpacts, size_t ofpacts_len,
6316 struct xlate_ctx *ctx, bool is_last_action)
6317 {
6318 struct flow_wildcards *wc = ctx->wc;
6319 struct flow *flow = &ctx->xin->flow;
6320 const struct ofpact *a;
6321
6322 /* dl_type already in the mask, not set below. */
6323
6324 if (!ofpacts_len) {
6325 xlate_report(ctx, OFT_ACTION, "drop");
6326 return;
6327 }
6328
6329 OFPACT_FOR_EACH (a, ofpacts, ofpacts_len) {
6330 struct ofpact_controller *controller;
6331 const struct ofpact_metadata *metadata;
6332 const struct ofpact_set_field *set_field;
6333 const struct mf_field *mf;
6334 bool last = is_last_action && ofpact_last(a, ofpacts, ofpacts_len)
6335 && ctx->action_set.size;
6336
6337 if (ctx->error) {
6338 break;
6339 }
6340
6341 recirc_for_mpls(a, ctx);
6342
6343 if (ctx->exit) {
6344 /* Check if need to store the remaining actions for later
6345 * execution. */
6346 if (ctx->freezing) {
6347 freeze_unroll_actions(a, ofpact_end(ofpacts, ofpacts_len),
6348 ctx);
6349 }
6350 break;
6351 }
6352
6353 if (OVS_UNLIKELY(ctx->xin->trace)) {
6354 struct ds s = DS_EMPTY_INITIALIZER;
6355 struct ofpact_format_params fp = { .s = &s };
6356 ofpacts_format(a, OFPACT_ALIGN(a->len), &fp);
6357 xlate_report(ctx, OFT_ACTION, "%s", ds_cstr(&s));
6358 ds_destroy(&s);
6359 }
6360
6361 switch (a->type) {
6362 case OFPACT_OUTPUT:
6363 xlate_output_action(ctx, ofpact_get_OUTPUT(a)->port,
6364 ofpact_get_OUTPUT(a)->max_len, true, last,
6365 false);
6366 break;
6367
6368 case OFPACT_GROUP:
6369 if (xlate_group_action(ctx, ofpact_get_GROUP(a)->group_id, last)) {
6370 /* Group could not be found. */
6371
6372 /* XXX: Terminates action list translation, but does not
6373 * terminate the pipeline. */
6374 return;
6375 }
6376 break;
6377
6378 case OFPACT_CONTROLLER:
6379 controller = ofpact_get_CONTROLLER(a);
6380 if (controller->pause) {
6381 ctx->pause = controller;
6382 ctx_trigger_freeze(ctx);
6383 a = ofpact_next(a);
6384 } else {
6385 xlate_controller_action(ctx, controller->max_len,
6386 controller->reason,
6387 controller->controller_id,
6388 controller->userdata,
6389 controller->userdata_len);
6390 }
6391 break;
6392
6393 case OFPACT_ENQUEUE:
6394 memset(&wc->masks.skb_priority, 0xff,
6395 sizeof wc->masks.skb_priority);
6396 xlate_enqueue_action(ctx, ofpact_get_ENQUEUE(a), last);
6397 break;
6398
6399 case OFPACT_SET_VLAN_VID:
6400 wc->masks.vlans[0].tci |= htons(VLAN_VID_MASK | VLAN_CFI);
6401 if (flow->vlans[0].tci & htons(VLAN_CFI) ||
6402 ofpact_get_SET_VLAN_VID(a)->push_vlan_if_needed) {
6403 if (!flow->vlans[0].tpid) {
6404 flow->vlans[0].tpid = htons(ETH_TYPE_VLAN);
6405 }
6406 flow->vlans[0].tci &= ~htons(VLAN_VID_MASK);
6407 flow->vlans[0].tci |=
6408 (htons(ofpact_get_SET_VLAN_VID(a)->vlan_vid) |
6409 htons(VLAN_CFI));
6410 }
6411 break;
6412
6413 case OFPACT_SET_VLAN_PCP:
6414 wc->masks.vlans[0].tci |= htons(VLAN_PCP_MASK | VLAN_CFI);
6415 if (flow->vlans[0].tci & htons(VLAN_CFI) ||
6416 ofpact_get_SET_VLAN_PCP(a)->push_vlan_if_needed) {
6417 if (!flow->vlans[0].tpid) {
6418 flow->vlans[0].tpid = htons(ETH_TYPE_VLAN);
6419 }
6420 flow->vlans[0].tci &= ~htons(VLAN_PCP_MASK);
6421 flow->vlans[0].tci |=
6422 htons((ofpact_get_SET_VLAN_PCP(a)->vlan_pcp
6423 << VLAN_PCP_SHIFT) | VLAN_CFI);
6424 }
6425 break;
6426
6427 case OFPACT_STRIP_VLAN:
6428 flow_pop_vlan(flow, wc);
6429 break;
6430
6431 case OFPACT_PUSH_VLAN:
6432 flow_push_vlan_uninit(flow, wc);
6433 flow->vlans[0].tpid = ofpact_get_PUSH_VLAN(a)->ethertype;
6434 flow->vlans[0].tci = htons(VLAN_CFI);
6435 break;
6436
6437 case OFPACT_SET_ETH_SRC:
6438 WC_MASK_FIELD(wc, dl_src);
6439 flow->dl_src = ofpact_get_SET_ETH_SRC(a)->mac;
6440 break;
6441
6442 case OFPACT_SET_ETH_DST:
6443 WC_MASK_FIELD(wc, dl_dst);
6444 flow->dl_dst = ofpact_get_SET_ETH_DST(a)->mac;
6445 break;
6446
6447 case OFPACT_SET_IPV4_SRC:
6448 if (flow->dl_type == htons(ETH_TYPE_IP)) {
6449 memset(&wc->masks.nw_src, 0xff, sizeof wc->masks.nw_src);
6450 flow->nw_src = ofpact_get_SET_IPV4_SRC(a)->ipv4;
6451 }
6452 break;
6453
6454 case OFPACT_SET_IPV4_DST:
6455 if (flow->dl_type == htons(ETH_TYPE_IP)) {
6456 memset(&wc->masks.nw_dst, 0xff, sizeof wc->masks.nw_dst);
6457 flow->nw_dst = ofpact_get_SET_IPV4_DST(a)->ipv4;
6458 }
6459 break;
6460
6461 case OFPACT_SET_IP_DSCP:
6462 if (is_ip_any(flow)) {
6463 wc->masks.nw_tos |= IP_DSCP_MASK;
6464 flow->nw_tos &= ~IP_DSCP_MASK;
6465 flow->nw_tos |= ofpact_get_SET_IP_DSCP(a)->dscp;
6466 }
6467 break;
6468
6469 case OFPACT_SET_IP_ECN:
6470 if (is_ip_any(flow)) {
6471 wc->masks.nw_tos |= IP_ECN_MASK;
6472 flow->nw_tos &= ~IP_ECN_MASK;
6473 flow->nw_tos |= ofpact_get_SET_IP_ECN(a)->ecn;
6474 }
6475 break;
6476
6477 case OFPACT_SET_IP_TTL:
6478 if (is_ip_any(flow)) {
6479 wc->masks.nw_ttl = 0xff;
6480 flow->nw_ttl = ofpact_get_SET_IP_TTL(a)->ttl;
6481 }
6482 break;
6483
6484 case OFPACT_SET_L4_SRC_PORT:
6485 if (is_ip_any(flow) && !(flow->nw_frag & FLOW_NW_FRAG_LATER)) {
6486 memset(&wc->masks.nw_proto, 0xff, sizeof wc->masks.nw_proto);
6487 memset(&wc->masks.tp_src, 0xff, sizeof wc->masks.tp_src);
6488 flow->tp_src = htons(ofpact_get_SET_L4_SRC_PORT(a)->port);
6489 }
6490 break;
6491
6492 case OFPACT_SET_L4_DST_PORT:
6493 if (is_ip_any(flow) && !(flow->nw_frag & FLOW_NW_FRAG_LATER)) {
6494 memset(&wc->masks.nw_proto, 0xff, sizeof wc->masks.nw_proto);
6495 memset(&wc->masks.tp_dst, 0xff, sizeof wc->masks.tp_dst);
6496 flow->tp_dst = htons(ofpact_get_SET_L4_DST_PORT(a)->port);
6497 }
6498 break;
6499
6500 case OFPACT_RESUBMIT:
6501 /* Freezing complicates resubmit. Some action in the flow
6502 * entry found by resubmit might trigger freezing. If that
6503 * happens, then we do not want to execute the resubmit again after
6504 * during thawing, so we want to skip back to the head of the loop
6505 * to avoid that, only adding any actions that follow the resubmit
6506 * to the frozen actions.
6507 */
6508 xlate_ofpact_resubmit(ctx, ofpact_get_RESUBMIT(a), last);
6509 continue;
6510
6511 case OFPACT_SET_TUNNEL:
6512 flow->tunnel.tun_id = htonll(ofpact_get_SET_TUNNEL(a)->tun_id);
6513 break;
6514
6515 case OFPACT_SET_QUEUE:
6516 memset(&wc->masks.skb_priority, 0xff,
6517 sizeof wc->masks.skb_priority);
6518 xlate_set_queue_action(ctx, ofpact_get_SET_QUEUE(a)->queue_id);
6519 break;
6520
6521 case OFPACT_POP_QUEUE:
6522 memset(&wc->masks.skb_priority, 0xff,
6523 sizeof wc->masks.skb_priority);
6524 if (flow->skb_priority != ctx->orig_skb_priority) {
6525 flow->skb_priority = ctx->orig_skb_priority;
6526 xlate_report(ctx, OFT_DETAIL, "queue = %#"PRIx32,
6527 flow->skb_priority);
6528 }
6529 break;
6530
6531 case OFPACT_REG_MOVE:
6532 xlate_ofpact_reg_move(ctx, ofpact_get_REG_MOVE(a));
6533 break;
6534
6535 case OFPACT_SET_FIELD:
6536 set_field = ofpact_get_SET_FIELD(a);
6537 mf = set_field->field;
6538
6539 /* Set the field only if the packet actually has it. */
6540 if (mf_are_prereqs_ok(mf, flow, wc)) {
6541 mf_mask_field_masked(mf, ofpact_set_field_mask(set_field), wc);
6542 mf_set_flow_value_masked(mf, set_field->value,
6543 ofpact_set_field_mask(set_field),
6544 flow);
6545 } else {
6546 xlate_report(ctx, OFT_WARN,
6547 "unmet prerequisites for %s, set_field ignored",
6548 mf->name);
6549
6550 }
6551 break;
6552
6553 case OFPACT_STACK_PUSH:
6554 nxm_execute_stack_push(ofpact_get_STACK_PUSH(a), flow, wc,
6555 &ctx->stack);
6556 break;
6557
6558 case OFPACT_STACK_POP:
6559 xlate_ofpact_stack_pop(ctx, ofpact_get_STACK_POP(a));
6560 break;
6561
6562 case OFPACT_PUSH_MPLS:
6563 compose_mpls_push_action(ctx, ofpact_get_PUSH_MPLS(a));
6564 break;
6565
6566 case OFPACT_POP_MPLS:
6567 compose_mpls_pop_action(ctx, ofpact_get_POP_MPLS(a)->ethertype);
6568 break;
6569
6570 case OFPACT_SET_MPLS_LABEL:
6571 compose_set_mpls_label_action(
6572 ctx, ofpact_get_SET_MPLS_LABEL(a)->label);
6573 break;
6574
6575 case OFPACT_SET_MPLS_TC:
6576 compose_set_mpls_tc_action(ctx, ofpact_get_SET_MPLS_TC(a)->tc);
6577 break;
6578
6579 case OFPACT_SET_MPLS_TTL:
6580 compose_set_mpls_ttl_action(ctx, ofpact_get_SET_MPLS_TTL(a)->ttl);
6581 break;
6582
6583 case OFPACT_DEC_MPLS_TTL:
6584 if (compose_dec_mpls_ttl_action(ctx)) {
6585 return;
6586 }
6587 break;
6588
6589 case OFPACT_DEC_NSH_TTL:
6590 if (compose_dec_nsh_ttl_action(ctx)) {
6591 return;
6592 }
6593 break;
6594
6595 case OFPACT_DEC_TTL:
6596 wc->masks.nw_ttl = 0xff;
6597 if (compose_dec_ttl(ctx, ofpact_get_DEC_TTL(a))) {
6598 return;
6599 }
6600 break;
6601
6602 case OFPACT_NOTE:
6603 /* Nothing to do. */
6604 break;
6605
6606 case OFPACT_MULTIPATH:
6607 multipath_execute(ofpact_get_MULTIPATH(a), flow, wc);
6608 xlate_report_subfield(ctx, &ofpact_get_MULTIPATH(a)->dst);
6609 break;
6610
6611 case OFPACT_BUNDLE:
6612 xlate_bundle_action(ctx, ofpact_get_BUNDLE(a), last);
6613 break;
6614
6615 case OFPACT_OUTPUT_REG:
6616 xlate_output_reg_action(ctx, ofpact_get_OUTPUT_REG(a), last);
6617 break;
6618
6619 case OFPACT_OUTPUT_TRUNC:
6620 xlate_output_trunc_action(ctx, ofpact_get_OUTPUT_TRUNC(a)->port,
6621 ofpact_get_OUTPUT_TRUNC(a)->max_len, last);
6622 break;
6623
6624 case OFPACT_LEARN:
6625 xlate_learn_action(ctx, ofpact_get_LEARN(a));
6626 break;
6627
6628 case OFPACT_CONJUNCTION:
6629 /* A flow with a "conjunction" action represents part of a special
6630 * kind of "set membership match". Such a flow should not actually
6631 * get executed, but it could via, say, a "packet-out", even though
6632 * that wouldn't be useful. Log it to help debugging. */
6633 xlate_report_error(ctx, "executing no-op conjunction action");
6634 break;
6635
6636 case OFPACT_EXIT:
6637 ctx->exit = true;
6638 break;
6639
6640 case OFPACT_UNROLL_XLATE:
6641 xlate_ofpact_unroll_xlate(ctx, ofpact_get_UNROLL_XLATE(a));
6642 break;
6643
6644 case OFPACT_FIN_TIMEOUT:
6645 memset(&wc->masks.nw_proto, 0xff, sizeof wc->masks.nw_proto);
6646 xlate_fin_timeout(ctx, ofpact_get_FIN_TIMEOUT(a));
6647 break;
6648
6649 case OFPACT_CLEAR_ACTIONS:
6650 xlate_report_action_set(ctx, "was");
6651 ofpbuf_clear(&ctx->action_set);
6652 ctx->xin->flow.actset_output = OFPP_UNSET;
6653 ctx->action_set_has_group = false;
6654 break;
6655
6656 case OFPACT_WRITE_ACTIONS:
6657 xlate_write_actions(ctx, ofpact_get_WRITE_ACTIONS(a));
6658 xlate_report_action_set(ctx, "is");
6659 break;
6660
6661 case OFPACT_WRITE_METADATA:
6662 metadata = ofpact_get_WRITE_METADATA(a);
6663 flow->metadata &= ~metadata->mask;
6664 flow->metadata |= metadata->metadata & metadata->mask;
6665 break;
6666
6667 case OFPACT_METER:
6668 xlate_meter_action(ctx, ofpact_get_METER(a));
6669 break;
6670
6671 case OFPACT_GOTO_TABLE: {
6672 struct ofpact_goto_table *ogt = ofpact_get_GOTO_TABLE(a);
6673
6674 ovs_assert(ctx->table_id < ogt->table_id);
6675
6676 xlate_table_action(ctx, ctx->xin->flow.in_port.ofp_port,
6677 ogt->table_id, true, true, false, last,
6678 do_xlate_actions);
6679 break;
6680 }
6681
6682 case OFPACT_SAMPLE:
6683 xlate_sample_action(ctx, ofpact_get_SAMPLE(a));
6684 break;
6685
6686 case OFPACT_CLONE:
6687 compose_clone(ctx, ofpact_get_CLONE(a), last);
6688 break;
6689
6690 case OFPACT_ENCAP:
6691 xlate_generic_encap_action(ctx, ofpact_get_ENCAP(a));
6692 break;
6693
6694 case OFPACT_DECAP: {
6695 bool recirc_needed =
6696 xlate_generic_decap_action(ctx, ofpact_get_DECAP(a));
6697 if (!ctx->error && recirc_needed) {
6698 /* Recirculate for parsing of inner packet. */
6699 ctx_trigger_freeze(ctx);
6700 /* Then continue with next action. */
6701 a = ofpact_next(a);
6702 }
6703 break;
6704 }
6705
6706 case OFPACT_CT:
6707 compose_conntrack_action(ctx, ofpact_get_CT(a), last);
6708 break;
6709
6710 case OFPACT_CT_CLEAR:
6711 compose_ct_clear_action(ctx);
6712 break;
6713
6714 case OFPACT_NAT:
6715 /* This will be processed by compose_conntrack_action(). */
6716 ctx->ct_nat_action = ofpact_get_NAT(a);
6717 break;
6718
6719 case OFPACT_DEBUG_RECIRC:
6720 ctx_trigger_freeze(ctx);
6721 a = ofpact_next(a);
6722 break;
6723
6724 case OFPACT_DEBUG_SLOW:
6725 ctx->xout->slow |= SLOW_ACTION;
6726 break;
6727 }
6728
6729 /* Check if need to store this and the remaining actions for later
6730 * execution. */
6731 if (!ctx->error && ctx->exit && ctx_first_frozen_action(ctx)) {
6732 freeze_unroll_actions(a, ofpact_end(ofpacts, ofpacts_len), ctx);
6733 break;
6734 }
6735 }
6736 }
6737
6738 void
6739 xlate_in_init(struct xlate_in *xin, struct ofproto_dpif *ofproto,
6740 ovs_version_t version, const struct flow *flow,
6741 ofp_port_t in_port, struct rule_dpif *rule, uint16_t tcp_flags,
6742 const struct dp_packet *packet, struct flow_wildcards *wc,
6743 struct ofpbuf *odp_actions)
6744 {
6745 xin->ofproto = ofproto;
6746 xin->tables_version = version;
6747 xin->flow = *flow;
6748 xin->upcall_flow = flow;
6749 xin->flow.in_port.ofp_port = in_port;
6750 xin->flow.actset_output = OFPP_UNSET;
6751 xin->packet = packet;
6752 xin->allow_side_effects = packet != NULL;
6753 xin->rule = rule;
6754 xin->xcache = NULL;
6755 xin->ofpacts = NULL;
6756 xin->ofpacts_len = 0;
6757 xin->tcp_flags = tcp_flags;
6758 xin->trace = NULL;
6759 xin->resubmit_stats = NULL;
6760 xin->depth = 0;
6761 xin->resubmits = 0;
6762 xin->wc = wc;
6763 xin->odp_actions = odp_actions;
6764 xin->in_packet_out = false;
6765 xin->recirc_queue = NULL;
6766 xin->xport_uuid = UUID_ZERO;
6767
6768 /* Do recirc lookup. */
6769 xin->frozen_state = NULL;
6770 if (flow->recirc_id) {
6771 const struct recirc_id_node *node
6772 = recirc_id_node_find(flow->recirc_id);
6773 if (node) {
6774 xin->frozen_state = &node->state;
6775 }
6776 }
6777 }
6778
6779 void
6780 xlate_out_uninit(struct xlate_out *xout)
6781 {
6782 if (xout) {
6783 recirc_refs_unref(&xout->recircs);
6784 }
6785 }
6786 \f
6787 static struct skb_priority_to_dscp *
6788 get_skb_priority(const struct xport *xport, uint32_t skb_priority)
6789 {
6790 struct skb_priority_to_dscp *pdscp;
6791 uint32_t hash;
6792
6793 hash = hash_int(skb_priority, 0);
6794 HMAP_FOR_EACH_IN_BUCKET (pdscp, hmap_node, hash, &xport->skb_priorities) {
6795 if (pdscp->skb_priority == skb_priority) {
6796 return pdscp;
6797 }
6798 }
6799 return NULL;
6800 }
6801
6802 static bool
6803 dscp_from_skb_priority(const struct xport *xport, uint32_t skb_priority,
6804 uint8_t *dscp)
6805 {
6806 struct skb_priority_to_dscp *pdscp = get_skb_priority(xport, skb_priority);
6807 *dscp = pdscp ? pdscp->dscp : 0;
6808 return pdscp != NULL;
6809 }
6810
6811 static size_t
6812 count_skb_priorities(const struct xport *xport)
6813 {
6814 return hmap_count(&xport->skb_priorities);
6815 }
6816
6817 static void
6818 clear_skb_priorities(struct xport *xport)
6819 {
6820 struct skb_priority_to_dscp *pdscp;
6821
6822 HMAP_FOR_EACH_POP (pdscp, hmap_node, &xport->skb_priorities) {
6823 free(pdscp);
6824 }
6825 }
6826
6827 static bool
6828 actions_output_to_local_port(const struct xlate_ctx *ctx)
6829 {
6830 odp_port_t local_odp_port = ofp_port_to_odp_port(ctx->xbridge, OFPP_LOCAL);
6831 const struct nlattr *a;
6832 unsigned int left;
6833
6834 NL_ATTR_FOR_EACH_UNSAFE (a, left, ctx->odp_actions->data,
6835 ctx->odp_actions->size) {
6836 if (nl_attr_type(a) == OVS_ACTION_ATTR_OUTPUT
6837 && nl_attr_get_odp_port(a) == local_odp_port) {
6838 return true;
6839 }
6840 }
6841 return false;
6842 }
6843
6844 #if defined(__linux__)
6845 /* Returns the maximum number of packets that the Linux kernel is willing to
6846 * queue up internally to certain kinds of software-implemented ports, or the
6847 * default (and rarely modified) value if it cannot be determined. */
6848 static int
6849 netdev_max_backlog(void)
6850 {
6851 static struct ovsthread_once once = OVSTHREAD_ONCE_INITIALIZER;
6852 static int max_backlog = 1000; /* The normal default value. */
6853
6854 if (ovsthread_once_start(&once)) {
6855 static const char filename[] = "/proc/sys/net/core/netdev_max_backlog";
6856 FILE *stream;
6857 int n;
6858
6859 stream = fopen(filename, "r");
6860 if (!stream) {
6861 VLOG_INFO("%s: open failed (%s)", filename, ovs_strerror(errno));
6862 } else {
6863 if (fscanf(stream, "%d", &n) != 1) {
6864 VLOG_WARN("%s: read error", filename);
6865 } else if (n <= 100) {
6866 VLOG_WARN("%s: unexpectedly small value %d", filename, n);
6867 } else {
6868 max_backlog = n;
6869 }
6870 fclose(stream);
6871 }
6872 ovsthread_once_done(&once);
6873
6874 VLOG_DBG("%s: using %d max_backlog", filename, max_backlog);
6875 }
6876
6877 return max_backlog;
6878 }
6879
6880 /* Counts and returns the number of OVS_ACTION_ATTR_OUTPUT actions in
6881 * 'odp_actions'. */
6882 static int
6883 count_output_actions(const struct ofpbuf *odp_actions)
6884 {
6885 const struct nlattr *a;
6886 size_t left;
6887 int n = 0;
6888
6889 NL_ATTR_FOR_EACH_UNSAFE (a, left, odp_actions->data, odp_actions->size) {
6890 if (a->nla_type == OVS_ACTION_ATTR_OUTPUT) {
6891 n++;
6892 }
6893 }
6894 return n;
6895 }
6896 #endif /* defined(__linux__) */
6897
6898 /* Returns true if 'odp_actions' contains more output actions than the datapath
6899 * can reliably handle in one go. On Linux, this is the value of the
6900 * net.core.netdev_max_backlog sysctl, which limits the maximum number of
6901 * packets that the kernel is willing to queue up for processing while the
6902 * datapath is processing a set of actions. */
6903 static bool
6904 too_many_output_actions(const struct ofpbuf *odp_actions OVS_UNUSED)
6905 {
6906 #ifdef __linux__
6907 return (odp_actions->size / NL_A_U32_SIZE > netdev_max_backlog()
6908 && count_output_actions(odp_actions) > netdev_max_backlog());
6909 #else
6910 /* OSes other than Linux might have similar limits, but we don't know how
6911 * to determine them.*/
6912 return false;
6913 #endif
6914 }
6915
6916 static void
6917 xlate_wc_init(struct xlate_ctx *ctx)
6918 {
6919 flow_wildcards_init_catchall(ctx->wc);
6920
6921 /* Some fields we consider to always be examined. */
6922 WC_MASK_FIELD(ctx->wc, packet_type);
6923 WC_MASK_FIELD(ctx->wc, in_port);
6924 if (is_ethernet(&ctx->xin->flow, NULL)) {
6925 WC_MASK_FIELD(ctx->wc, dl_type);
6926 }
6927 if (is_ip_any(&ctx->xin->flow)) {
6928 WC_MASK_FIELD_MASK(ctx->wc, nw_frag, FLOW_NW_FRAG_MASK);
6929 }
6930
6931 if (ctx->xbridge->support.odp.recirc) {
6932 /* Always exactly match recirc_id when datapath supports
6933 * recirculation. */
6934 WC_MASK_FIELD(ctx->wc, recirc_id);
6935 }
6936
6937 if (ctx->xbridge->netflow) {
6938 netflow_mask_wc(&ctx->xin->flow, ctx->wc);
6939 }
6940
6941 tnl_wc_init(&ctx->xin->flow, ctx->wc);
6942 }
6943
6944 static void
6945 xlate_wc_finish(struct xlate_ctx *ctx)
6946 {
6947 int i;
6948
6949 /* Clear the metadata and register wildcard masks, because we won't
6950 * use non-header fields as part of the cache. */
6951 flow_wildcards_clear_non_packet_fields(ctx->wc);
6952
6953 /* Wildcard ethernet fields if the original packet type was not
6954 * Ethernet. */
6955 if (ctx->xin->upcall_flow->packet_type != htonl(PT_ETH)) {
6956 ctx->wc->masks.dl_dst = eth_addr_zero;
6957 ctx->wc->masks.dl_src = eth_addr_zero;
6958 ctx->wc->masks.dl_type = 0;
6959 }
6960
6961 /* ICMPv4 and ICMPv6 have 8-bit "type" and "code" fields. struct flow
6962 * uses the low 8 bits of the 16-bit tp_src and tp_dst members to
6963 * represent these fields. The datapath interface, on the other hand,
6964 * represents them with just 8 bits each. This means that if the high
6965 * 8 bits of the masks for these fields somehow become set, then they
6966 * will get chopped off by a round trip through the datapath, and
6967 * revalidation will spot that as an inconsistency and delete the flow.
6968 * Avoid the problem here by making sure that only the low 8 bits of
6969 * either field can be unwildcarded for ICMP.
6970 */
6971 if (is_icmpv4(&ctx->xin->flow, NULL) || is_icmpv6(&ctx->xin->flow, NULL)) {
6972 ctx->wc->masks.tp_src &= htons(UINT8_MAX);
6973 ctx->wc->masks.tp_dst &= htons(UINT8_MAX);
6974 }
6975 /* VLAN_TCI CFI bit must be matched if any of the TCI is matched. */
6976 for (i = 0; i < FLOW_MAX_VLAN_HEADERS; i++) {
6977 if (ctx->wc->masks.vlans[i].tci) {
6978 ctx->wc->masks.vlans[i].tci |= htons(VLAN_CFI);
6979 }
6980 }
6981
6982 /* The classifier might return masks that match on tp_src and tp_dst even
6983 * for later fragments. This happens because there might be flows that
6984 * match on tp_src or tp_dst without matching on the frag bits, because
6985 * it is not a prerequisite for OpenFlow. Since it is a prerequisite for
6986 * datapath flows and since tp_src and tp_dst are always going to be 0,
6987 * wildcard the fields here. */
6988 if (ctx->xin->flow.nw_frag & FLOW_NW_FRAG_LATER) {
6989 ctx->wc->masks.tp_src = 0;
6990 ctx->wc->masks.tp_dst = 0;
6991 }
6992 }
6993
6994 /* Translates the flow, actions, or rule in 'xin' into datapath actions in
6995 * 'xout'.
6996 * The caller must take responsibility for eventually freeing 'xout', with
6997 * xlate_out_uninit().
6998 * Returns 'XLATE_OK' if translation was successful. In case of an error an
6999 * empty set of actions will be returned in 'xin->odp_actions' (if non-NULL),
7000 * so that most callers may ignore the return value and transparently install a
7001 * drop flow when the translation fails. */
7002 enum xlate_error
7003 xlate_actions(struct xlate_in *xin, struct xlate_out *xout)
7004 {
7005 *xout = (struct xlate_out) {
7006 .slow = 0,
7007 .recircs = RECIRC_REFS_EMPTY_INITIALIZER,
7008 };
7009
7010 struct xlate_cfg *xcfg = ovsrcu_get(struct xlate_cfg *, &xcfgp);
7011 struct xbridge *xbridge = xbridge_lookup(xcfg, xin->ofproto);
7012 if (!xbridge) {
7013 return XLATE_BRIDGE_NOT_FOUND;
7014 }
7015
7016 struct flow *flow = &xin->flow;
7017
7018 uint8_t stack_stub[1024];
7019 uint64_t action_set_stub[1024 / 8];
7020 uint64_t frozen_actions_stub[1024 / 8];
7021 uint64_t actions_stub[256 / 8];
7022 struct ofpbuf scratch_actions = OFPBUF_STUB_INITIALIZER(actions_stub);
7023 struct xlate_ctx ctx = {
7024 .xin = xin,
7025 .xout = xout,
7026 .base_flow = *flow,
7027 .orig_tunnel_ipv6_dst = flow_tnl_dst(&flow->tunnel),
7028 .xcfg = xcfg,
7029 .xbridge = xbridge,
7030 .stack = OFPBUF_STUB_INITIALIZER(stack_stub),
7031 .rule = xin->rule,
7032 .wc = (xin->wc
7033 ? xin->wc
7034 : &(struct flow_wildcards) { .masks = { .dl_type = 0 } }),
7035 .odp_actions = xin->odp_actions ? xin->odp_actions : &scratch_actions,
7036
7037 .depth = xin->depth,
7038 .resubmits = xin->resubmits,
7039 .in_group = false,
7040 .in_action_set = false,
7041 .in_packet_out = xin->in_packet_out,
7042 .pending_encap = false,
7043 .pending_decap = false,
7044 .encap_data = NULL,
7045
7046 .table_id = 0,
7047 .rule_cookie = OVS_BE64_MAX,
7048 .orig_skb_priority = flow->skb_priority,
7049 .sflow_n_outputs = 0,
7050 .sflow_odp_port = 0,
7051 .nf_output_iface = NF_OUT_DROP,
7052 .exit = false,
7053 .error = XLATE_OK,
7054 .mirrors = 0,
7055
7056 .freezing = false,
7057 .recirc_update_dp_hash = false,
7058 .frozen_actions = OFPBUF_STUB_INITIALIZER(frozen_actions_stub),
7059 .pause = NULL,
7060
7061 .was_mpls = false,
7062 .conntracked = false,
7063
7064 .ct_nat_action = NULL,
7065
7066 .action_set_has_group = false,
7067 .action_set = OFPBUF_STUB_INITIALIZER(action_set_stub),
7068 };
7069
7070 /* 'base_flow' reflects the packet as it came in, but we need it to reflect
7071 * the packet as the datapath will treat it for output actions. Our
7072 * datapath doesn't retain tunneling information without us re-setting
7073 * it, so clear the tunnel data.
7074 */
7075
7076 memset(&ctx.base_flow.tunnel, 0, sizeof ctx.base_flow.tunnel);
7077
7078 ofpbuf_reserve(ctx.odp_actions, NL_A_U32_SIZE);
7079 xlate_wc_init(&ctx);
7080
7081 COVERAGE_INC(xlate_actions);
7082
7083 xin->trace = xlate_report(&ctx, OFT_BRIDGE, "bridge(\"%s\")",
7084 xbridge->name);
7085 if (xin->frozen_state) {
7086 const struct frozen_state *state = xin->frozen_state;
7087
7088 struct ovs_list *old_trace = xin->trace;
7089 xin->trace = xlate_report(&ctx, OFT_THAW, "thaw");
7090
7091 if (xin->ofpacts_len > 0 || ctx.rule) {
7092 xlate_report_error(&ctx, "Recirculation conflict (%s)!",
7093 xin->ofpacts_len ? "actions" : "rule");
7094 ctx.error = XLATE_RECIRCULATION_CONFLICT;
7095 goto exit;
7096 }
7097
7098 /* Set the bridge for post-recirculation processing if needed. */
7099 if (!uuid_equals(&ctx.xbridge->ofproto->uuid, &state->ofproto_uuid)) {
7100 const struct xbridge *new_bridge
7101 = xbridge_lookup_by_uuid(xcfg, &state->ofproto_uuid);
7102
7103 if (OVS_UNLIKELY(!new_bridge)) {
7104 /* Drop the packet if the bridge cannot be found. */
7105 xlate_report_error(&ctx, "Frozen bridge no longer exists.");
7106 ctx.error = XLATE_BRIDGE_NOT_FOUND;
7107 xin->trace = old_trace;
7108 goto exit;
7109 }
7110 ctx.xbridge = new_bridge;
7111 /* The bridge is now known so obtain its table version. */
7112 ctx.xin->tables_version
7113 = ofproto_dpif_get_tables_version(ctx.xbridge->ofproto);
7114 }
7115
7116 /* Set the thawed table id. Note: A table lookup is done only if there
7117 * are no frozen actions. */
7118 ctx.table_id = state->table_id;
7119 xlate_report(&ctx, OFT_THAW,
7120 "Resuming from table %"PRIu8, ctx.table_id);
7121
7122 ctx.conntracked = state->conntracked;
7123 if (!state->conntracked) {
7124 clear_conntrack(&ctx);
7125 }
7126
7127 /* Restore pipeline metadata. May change flow's in_port and other
7128 * metadata to the values that existed when freezing was triggered. */
7129 frozen_metadata_to_flow(&state->metadata, flow);
7130
7131 /* Restore stack, if any. */
7132 if (state->stack) {
7133 ofpbuf_put(&ctx.stack, state->stack, state->stack_size);
7134 }
7135
7136 /* Restore mirror state. */
7137 ctx.mirrors = state->mirrors;
7138
7139 /* Restore action set, if any. */
7140 if (state->action_set_len) {
7141 xlate_report_actions(&ctx, OFT_THAW, "Restoring action set",
7142 state->action_set, state->action_set_len);
7143
7144 flow->actset_output = OFPP_UNSET;
7145 xlate_write_actions__(&ctx, state->action_set,
7146 state->action_set_len);
7147 }
7148
7149 /* Restore frozen actions. If there are no actions, processing will
7150 * start with a lookup in the table set above. */
7151 xin->ofpacts = state->ofpacts;
7152 xin->ofpacts_len = state->ofpacts_len;
7153 if (state->ofpacts_len) {
7154 xlate_report_actions(&ctx, OFT_THAW, "Restoring actions",
7155 xin->ofpacts, xin->ofpacts_len);
7156 }
7157
7158 xin->trace = old_trace;
7159 } else if (OVS_UNLIKELY(flow->recirc_id)) {
7160 xlate_report_error(&ctx,
7161 "Recirculation context not found for ID %"PRIx32,
7162 flow->recirc_id);
7163 ctx.error = XLATE_NO_RECIRCULATION_CONTEXT;
7164 goto exit;
7165 }
7166
7167 /* Tunnel metadata in udpif format must be normalized before translation. */
7168 if (flow->tunnel.flags & FLOW_TNL_F_UDPIF) {
7169 const struct tun_table *tun_tab = ofproto_get_tun_tab(
7170 &ctx.xbridge->ofproto->up);
7171 int err;
7172
7173 err = tun_metadata_from_geneve_udpif(tun_tab, &xin->upcall_flow->tunnel,
7174 &xin->upcall_flow->tunnel,
7175 &flow->tunnel);
7176 if (err) {
7177 xlate_report_error(&ctx, "Invalid Geneve tunnel metadata");
7178 ctx.error = XLATE_INVALID_TUNNEL_METADATA;
7179 goto exit;
7180 }
7181 } else if (!flow->tunnel.metadata.tab || xin->frozen_state) {
7182 /* If the original flow did not come in on a tunnel, then it won't have
7183 * FLOW_TNL_F_UDPIF set. However, we still need to have a metadata
7184 * table in case we generate tunnel actions. */
7185 /* If the translation is from a frozen state, we use the latest
7186 * TLV map to avoid segmentation fault in case the old TLV map is
7187 * replaced by a new one.
7188 * XXX: It is better to abort translation if the table is changed. */
7189 flow->tunnel.metadata.tab = ofproto_get_tun_tab(
7190 &ctx.xbridge->ofproto->up);
7191 }
7192 ctx.wc->masks.tunnel.metadata.tab = flow->tunnel.metadata.tab;
7193
7194 /* Get the proximate input port of the packet. (If xin->frozen_state,
7195 * flow->in_port is the ultimate input port of the packet.) */
7196 struct xport *in_port = get_ofp_port(xbridge,
7197 ctx.base_flow.in_port.ofp_port);
7198 if (in_port && !in_port->peer) {
7199 ctx.xin->xport_uuid = in_port->uuid;
7200 }
7201
7202 if (flow->packet_type != htonl(PT_ETH) && in_port &&
7203 in_port->pt_mode == NETDEV_PT_LEGACY_L3 && ctx.table_id == 0) {
7204 /* Add dummy Ethernet header to non-L2 packet if it's coming from a
7205 * L3 port. So all packets will be L2 packets for lookup.
7206 * The dl_type has already been set from the packet_type. */
7207 flow->packet_type = htonl(PT_ETH);
7208 flow->dl_src = eth_addr_zero;
7209 flow->dl_dst = eth_addr_zero;
7210 ctx.pending_encap = true;
7211 }
7212
7213 if (!xin->ofpacts && !ctx.rule) {
7214 ctx.rule = rule_dpif_lookup_from_table(
7215 ctx.xbridge->ofproto, ctx.xin->tables_version, flow, ctx.wc,
7216 ctx.xin->resubmit_stats, &ctx.table_id,
7217 flow->in_port.ofp_port, true, true, ctx.xin->xcache);
7218 if (ctx.xin->resubmit_stats) {
7219 rule_dpif_credit_stats(ctx.rule, ctx.xin->resubmit_stats);
7220 }
7221 if (ctx.xin->xcache) {
7222 struct xc_entry *entry;
7223
7224 entry = xlate_cache_add_entry(ctx.xin->xcache, XC_RULE);
7225 entry->rule = ctx.rule;
7226 ofproto_rule_ref(&ctx.rule->up);
7227 }
7228
7229 xlate_report_table(&ctx, ctx.rule, ctx.table_id);
7230 }
7231
7232 /* Tunnel stats only for not-thawed packets. */
7233 if (!xin->frozen_state && in_port && in_port->is_tunnel) {
7234 if (ctx.xin->resubmit_stats) {
7235 netdev_vport_inc_rx(in_port->netdev, ctx.xin->resubmit_stats);
7236 if (in_port->bfd) {
7237 bfd_account_rx(in_port->bfd, ctx.xin->resubmit_stats);
7238 }
7239 }
7240 if (ctx.xin->xcache) {
7241 struct xc_entry *entry;
7242
7243 entry = xlate_cache_add_entry(ctx.xin->xcache, XC_NETDEV);
7244 entry->dev.rx = netdev_ref(in_port->netdev);
7245 entry->dev.bfd = bfd_ref(in_port->bfd);
7246 }
7247 }
7248
7249 if (!xin->frozen_state && process_special(&ctx, in_port)) {
7250 /* process_special() did all the processing for this packet.
7251 *
7252 * We do not perform special processing on thawed packets, since that
7253 * was done before they were frozen and should not be redone. */
7254 mirror_ingress_packet(&ctx);
7255 } else if (in_port && in_port->xbundle
7256 && xbundle_mirror_out(xbridge, in_port->xbundle)) {
7257 xlate_report_error(&ctx, "dropping packet received on port "
7258 "%s, which is reserved exclusively for mirroring",
7259 in_port->xbundle->name);
7260 } else {
7261 /* Sampling is done on initial reception; don't redo after thawing. */
7262 unsigned int user_cookie_offset = 0;
7263 if (!xin->frozen_state) {
7264 user_cookie_offset = compose_sflow_action(&ctx);
7265 compose_ipfix_action(&ctx, ODPP_NONE);
7266 }
7267 size_t sample_actions_len = ctx.odp_actions->size;
7268
7269 if (tnl_process_ecn(flow)
7270 && (!in_port || may_receive(in_port, &ctx))) {
7271 const struct ofpact *ofpacts;
7272 size_t ofpacts_len;
7273
7274 if (xin->ofpacts) {
7275 ofpacts = xin->ofpacts;
7276 ofpacts_len = xin->ofpacts_len;
7277 } else if (ctx.rule) {
7278 const struct rule_actions *actions
7279 = rule_get_actions(&ctx.rule->up);
7280 ofpacts = actions->ofpacts;
7281 ofpacts_len = actions->ofpacts_len;
7282 ctx.rule_cookie = ctx.rule->up.flow_cookie;
7283 } else {
7284 OVS_NOT_REACHED();
7285 }
7286
7287 mirror_ingress_packet(&ctx);
7288 do_xlate_actions(ofpacts, ofpacts_len, &ctx, true);
7289 if (ctx.error) {
7290 goto exit;
7291 }
7292
7293 /* We've let OFPP_NORMAL and the learning action look at the
7294 * packet, so cancel all actions and freezing if forwarding is
7295 * disabled. */
7296 if (in_port && (!xport_stp_forward_state(in_port) ||
7297 !xport_rstp_forward_state(in_port))) {
7298 ctx.odp_actions->size = sample_actions_len;
7299 ctx_cancel_freeze(&ctx);
7300 ofpbuf_clear(&ctx.action_set);
7301 }
7302
7303 if (!ctx.freezing) {
7304 xlate_action_set(&ctx);
7305 }
7306 if (ctx.freezing) {
7307 finish_freezing(&ctx);
7308 }
7309 }
7310
7311 /* Output only fully processed packets. */
7312 if (!ctx.freezing
7313 && xbridge->has_in_band
7314 && in_band_must_output_to_local_port(flow)
7315 && !actions_output_to_local_port(&ctx)) {
7316 compose_output_action(&ctx, OFPP_LOCAL, NULL, false, false);
7317 }
7318
7319 if (user_cookie_offset) {
7320 fix_sflow_action(&ctx, user_cookie_offset);
7321 }
7322 }
7323
7324 if (nl_attr_oversized(ctx.odp_actions->size)) {
7325 /* These datapath actions are too big for a Netlink attribute, so we
7326 * can't hand them to the kernel directly. dpif_execute() can execute
7327 * them one by one with help, so just mark the result as SLOW_ACTION to
7328 * prevent the flow from being installed. */
7329 COVERAGE_INC(xlate_actions_oversize);
7330 ctx.xout->slow |= SLOW_ACTION;
7331 } else if (too_many_output_actions(ctx.odp_actions)) {
7332 COVERAGE_INC(xlate_actions_too_many_output);
7333 ctx.xout->slow |= SLOW_ACTION;
7334 }
7335
7336 /* Update NetFlow for non-frozen traffic. */
7337 if (xbridge->netflow && !xin->frozen_state) {
7338 if (ctx.xin->resubmit_stats) {
7339 netflow_flow_update(xbridge->netflow, flow,
7340 ctx.nf_output_iface,
7341 ctx.xin->resubmit_stats);
7342 }
7343 if (ctx.xin->xcache) {
7344 struct xc_entry *entry;
7345
7346 entry = xlate_cache_add_entry(ctx.xin->xcache, XC_NETFLOW);
7347 entry->nf.netflow = netflow_ref(xbridge->netflow);
7348 entry->nf.flow = xmemdup(flow, sizeof *flow);
7349 entry->nf.iface = ctx.nf_output_iface;
7350 }
7351 }
7352
7353 /* Translate tunnel metadata masks to udpif format if necessary. */
7354 if (xin->upcall_flow->tunnel.flags & FLOW_TNL_F_UDPIF) {
7355 if (ctx.wc->masks.tunnel.metadata.present.map) {
7356 const struct flow_tnl *upcall_tnl = &xin->upcall_flow->tunnel;
7357 struct geneve_opt opts[TLV_TOT_OPT_SIZE /
7358 sizeof(struct geneve_opt)];
7359
7360 tun_metadata_to_geneve_udpif_mask(&flow->tunnel,
7361 &ctx.wc->masks.tunnel,
7362 upcall_tnl->metadata.opts.gnv,
7363 upcall_tnl->metadata.present.len,
7364 opts);
7365 memset(&ctx.wc->masks.tunnel.metadata, 0,
7366 sizeof ctx.wc->masks.tunnel.metadata);
7367 memcpy(&ctx.wc->masks.tunnel.metadata.opts.gnv, opts,
7368 upcall_tnl->metadata.present.len);
7369 }
7370 ctx.wc->masks.tunnel.metadata.present.len = 0xff;
7371 ctx.wc->masks.tunnel.metadata.tab = NULL;
7372 ctx.wc->masks.tunnel.flags |= FLOW_TNL_F_UDPIF;
7373 } else if (!xin->upcall_flow->tunnel.metadata.tab) {
7374 /* If we didn't have options in UDPIF format and didn't have an existing
7375 * metadata table, then it means that there were no options at all when
7376 * we started processing and any wildcards we picked up were from
7377 * action generation. Without options on the incoming packet, wildcards
7378 * aren't meaningful. To avoid them possibly getting misinterpreted,
7379 * just clear everything. */
7380 if (ctx.wc->masks.tunnel.metadata.present.map) {
7381 memset(&ctx.wc->masks.tunnel.metadata, 0,
7382 sizeof ctx.wc->masks.tunnel.metadata);
7383 } else {
7384 ctx.wc->masks.tunnel.metadata.tab = NULL;
7385 }
7386 }
7387
7388 xlate_wc_finish(&ctx);
7389
7390 exit:
7391 /* Reset the table to what it was when we came in. If we only fetched
7392 * it locally, then it has no meaning outside of flow translation. */
7393 flow->tunnel.metadata.tab = xin->upcall_flow->tunnel.metadata.tab;
7394
7395 ofpbuf_uninit(&ctx.stack);
7396 ofpbuf_uninit(&ctx.action_set);
7397 ofpbuf_uninit(&ctx.frozen_actions);
7398 ofpbuf_uninit(&scratch_actions);
7399 ofpbuf_delete(ctx.encap_data);
7400
7401 /* Make sure we return a "drop flow" in case of an error. */
7402 if (ctx.error) {
7403 xout->slow = 0;
7404 if (xin->odp_actions) {
7405 ofpbuf_clear(xin->odp_actions);
7406 }
7407 }
7408 return ctx.error;
7409 }
7410
7411 enum ofperr
7412 xlate_resume(struct ofproto_dpif *ofproto,
7413 const struct ofputil_packet_in_private *pin,
7414 struct ofpbuf *odp_actions,
7415 enum slow_path_reason *slow)
7416 {
7417 struct dp_packet packet;
7418 dp_packet_use_const(&packet, pin->base.packet,
7419 pin->base.packet_len);
7420
7421 struct flow flow;
7422 flow_extract(&packet, &flow);
7423
7424 struct xlate_in xin;
7425 xlate_in_init(&xin, ofproto, ofproto_dpif_get_tables_version(ofproto),
7426 &flow, 0, NULL, ntohs(flow.tcp_flags),
7427 &packet, NULL, odp_actions);
7428
7429 struct ofpact_note noop;
7430 ofpact_init_NOTE(&noop);
7431 noop.length = 0;
7432
7433 bool any_actions = pin->actions_len > 0;
7434 struct frozen_state state = {
7435 .table_id = 0, /* Not the table where NXAST_PAUSE was executed. */
7436 .ofproto_uuid = pin->bridge,
7437 .stack = pin->stack,
7438 .stack_size = pin->stack_size,
7439 .mirrors = pin->mirrors,
7440 .conntracked = pin->conntracked,
7441 .xport_uuid = UUID_ZERO,
7442
7443 /* When there are no actions, xlate_actions() will search the flow
7444 * table. We don't want it to do that (we want it to resume), so
7445 * supply a no-op action if there aren't any.
7446 *
7447 * (We can't necessarily avoid translating actions entirely if there
7448 * aren't any actions, because there might be some finishing-up to do
7449 * at the end of the pipeline, and we don't check for those
7450 * conditions.) */
7451 .ofpacts = any_actions ? pin->actions : &noop.ofpact,
7452 .ofpacts_len = any_actions ? pin->actions_len : sizeof noop,
7453
7454 .action_set = pin->action_set,
7455 .action_set_len = pin->action_set_len,
7456 };
7457 frozen_metadata_from_flow(&state.metadata,
7458 &pin->base.flow_metadata.flow);
7459 xin.frozen_state = &state;
7460
7461 struct xlate_out xout;
7462 enum xlate_error error = xlate_actions(&xin, &xout);
7463 *slow = xout.slow;
7464 xlate_out_uninit(&xout);
7465
7466 /* xlate_actions() can generate a number of errors, but only
7467 * XLATE_BRIDGE_NOT_FOUND really stands out to me as one that we should be
7468 * sure to report over OpenFlow. The others could come up in packet-outs
7469 * or regular flow translation and I don't think that it's going to be too
7470 * useful to report them to the controller. */
7471 return error == XLATE_BRIDGE_NOT_FOUND ? OFPERR_NXR_STALE : 0;
7472 }
7473
7474 /* Sends 'packet' out 'ofport'. If 'port' is a tunnel and that tunnel type
7475 * supports a notion of an OAM flag, sets it if 'oam' is true.
7476 * May modify 'packet'.
7477 * Returns 0 if successful, otherwise a positive errno value. */
7478 int
7479 xlate_send_packet(const struct ofport_dpif *ofport, bool oam,
7480 struct dp_packet *packet)
7481 {
7482 struct xlate_cfg *xcfg = ovsrcu_get(struct xlate_cfg *, &xcfgp);
7483 struct xport *xport;
7484 uint64_t ofpacts_stub[1024 / 8];
7485 struct ofpbuf ofpacts;
7486 struct flow flow;
7487
7488 ofpbuf_use_stack(&ofpacts, ofpacts_stub, sizeof ofpacts_stub);
7489 /* Use OFPP_NONE as the in_port to avoid special packet processing. */
7490 flow_extract(packet, &flow);
7491 flow.in_port.ofp_port = OFPP_NONE;
7492
7493 xport = xport_lookup(xcfg, ofport);
7494 if (!xport) {
7495 return EINVAL;
7496 }
7497
7498 if (oam) {
7499 const ovs_be16 flag = htons(NX_TUN_FLAG_OAM);
7500 ofpact_put_set_field(&ofpacts, mf_from_id(MFF_TUN_FLAGS),
7501 &flag, &flag);
7502 }
7503
7504 ofpact_put_OUTPUT(&ofpacts)->port = xport->ofp_port;
7505
7506 /* Actions here are not referring to anything versionable (flow tables or
7507 * groups) so we don't need to worry about the version here. */
7508 return ofproto_dpif_execute_actions(xport->xbridge->ofproto,
7509 OVS_VERSION_MAX, &flow, NULL,
7510 ofpacts.data, ofpacts.size, packet);
7511 }
7512
7513 void
7514 xlate_mac_learning_update(const struct ofproto_dpif *ofproto,
7515 ofp_port_t in_port, struct eth_addr dl_src,
7516 int vlan, bool is_grat_arp)
7517 {
7518 struct xlate_cfg *xcfg = ovsrcu_get(struct xlate_cfg *, &xcfgp);
7519 struct xbridge *xbridge;
7520 struct xbundle *xbundle;
7521
7522 xbridge = xbridge_lookup(xcfg, ofproto);
7523 if (!xbridge) {
7524 return;
7525 }
7526
7527 xbundle = lookup_input_bundle__(xbridge, in_port, NULL);
7528 if (!xbundle) {
7529 return;
7530 }
7531
7532 update_learning_table__(xbridge, xbundle, dl_src, vlan, is_grat_arp);
7533 }
7534
7535 void
7536 xlate_set_support(const struct ofproto_dpif *ofproto,
7537 const struct dpif_backer_support *support)
7538 {
7539 struct xlate_cfg *xcfg = ovsrcu_get(struct xlate_cfg *, &xcfgp);
7540 struct xbridge *xbridge = xbridge_lookup(xcfg, ofproto);
7541
7542 if (xbridge) {
7543 xbridge->support = *support;
7544 }
7545 }