]> git.proxmox.com Git - mirror_ovs.git/blob - ofproto/ofproto-dpif-xlate.c
ovsdb: Use column diffs for ovsdb and raft log entries.
[mirror_ovs.git] / ofproto / ofproto-dpif-xlate.c
1 /* Copyright (c) 2009-2017, 2019-2020 Nicira, Inc.
2 *
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License. */
14
15 #include <config.h>
16
17 #include "ofproto/ofproto-dpif-xlate.h"
18
19 #include <errno.h>
20 #include <sys/types.h>
21 #include <netinet/in.h>
22 #include <arpa/inet.h>
23 #include <net/if.h>
24 #include <sys/socket.h>
25
26 #include "bfd.h"
27 #include "bitmap.h"
28 #include "bond.h"
29 #include "bundle.h"
30 #include "byte-order.h"
31 #include "cfm.h"
32 #include "connmgr.h"
33 #include "coverage.h"
34 #include "csum.h"
35 #include "dp-packet.h"
36 #include "dpif.h"
37 #include "in-band.h"
38 #include "lacp.h"
39 #include "learn.h"
40 #include "mac-learning.h"
41 #include "mcast-snooping.h"
42 #include "multipath.h"
43 #include "netdev-vport.h"
44 #include "netlink.h"
45 #include "nx-match.h"
46 #include "odp-execute.h"
47 #include "ofproto/ofproto-dpif-ipfix.h"
48 #include "ofproto/ofproto-dpif-mirror.h"
49 #include "ofproto/ofproto-dpif-monitor.h"
50 #include "ofproto/ofproto-dpif-sflow.h"
51 #include "ofproto/ofproto-dpif-trace.h"
52 #include "ofproto/ofproto-dpif-xlate-cache.h"
53 #include "ofproto/ofproto-dpif.h"
54 #include "ofproto/ofproto-provider.h"
55 #include "openvswitch/dynamic-string.h"
56 #include "openvswitch/meta-flow.h"
57 #include "openvswitch/list.h"
58 #include "openvswitch/ofp-actions.h"
59 #include "openvswitch/ofp-ed-props.h"
60 #include "openvswitch/vlog.h"
61 #include "ovs-lldp.h"
62 #include "ovs-router.h"
63 #include "packets.h"
64 #include "tnl-neigh-cache.h"
65 #include "tnl-ports.h"
66 #include "tunnel.h"
67 #include "util.h"
68 #include "uuid.h"
69
70 COVERAGE_DEFINE(xlate_actions);
71 COVERAGE_DEFINE(xlate_actions_oversize);
72 COVERAGE_DEFINE(xlate_actions_too_many_output);
73
74 VLOG_DEFINE_THIS_MODULE(ofproto_dpif_xlate);
75
76 /* Maximum depth of flow table recursion (due to resubmit actions) in a
77 * flow translation.
78 *
79 * The goal of limiting the depth of resubmits is to ensure that flow
80 * translation eventually terminates. Only resubmits to the same table or an
81 * earlier table count against the maximum depth. This is because resubmits to
82 * strictly monotonically increasing table IDs will eventually terminate, since
83 * any OpenFlow switch has a finite number of tables. OpenFlow tables are most
84 * commonly traversed in numerically increasing order, so this limit has little
85 * effect on conventionally designed OpenFlow pipelines.
86 *
87 * Outputs to patch ports and to groups also count against the depth limit. */
88 #define MAX_DEPTH 64
89
90 /* Maximum number of resubmit actions in a flow translation, whether they are
91 * recursive or not. */
92 #define MAX_RESUBMITS (MAX_DEPTH * MAX_DEPTH)
93
94 /* The structure holds an array of IP addresses assigned to a bridge and the
95 * number of elements in the array. These data are mutable and are evaluated
96 * when ARP or Neighbor Advertisement packets received on a native tunnel
97 * port are xlated. So 'ref_cnt' and RCU are used for synchronization. */
98 struct xbridge_addr {
99 struct in6_addr *addr; /* Array of IP addresses of xbridge. */
100 int n_addr; /* Number of IP addresses. */
101 struct ovs_refcount ref_cnt;
102 };
103
104 struct xbridge {
105 struct hmap_node hmap_node; /* Node in global 'xbridges' map. */
106 struct ofproto_dpif *ofproto; /* Key in global 'xbridges' map. */
107
108 struct ovs_list xbundles; /* Owned xbundles. */
109 struct hmap xports; /* Indexed by ofp_port. */
110
111 char *name; /* Name used in log messages. */
112 struct dpif *dpif; /* Datapath interface. */
113 struct mac_learning *ml; /* Mac learning handle. */
114 struct mcast_snooping *ms; /* Multicast Snooping handle. */
115 struct mbridge *mbridge; /* Mirroring. */
116 struct dpif_sflow *sflow; /* SFlow handle, or null. */
117 struct dpif_ipfix *ipfix; /* Ipfix handle, or null. */
118 struct netflow *netflow; /* Netflow handle, or null. */
119 struct stp *stp; /* STP or null if disabled. */
120 struct rstp *rstp; /* RSTP or null if disabled. */
121
122 bool has_in_band; /* Bridge has in band control? */
123 bool forward_bpdu; /* Bridge forwards STP BPDUs? */
124
125 /* Datapath feature support. */
126 struct dpif_backer_support support;
127
128 struct xbridge_addr *addr;
129 };
130
131 struct xbundle {
132 struct hmap_node hmap_node; /* In global 'xbundles' map. */
133 struct ofbundle *ofbundle; /* Key in global 'xbundles' map. */
134
135 struct ovs_list list_node; /* In parent 'xbridges' list. */
136 struct xbridge *xbridge; /* Parent xbridge. */
137
138 struct ovs_list xports; /* Contains "struct xport"s. */
139
140 char *name; /* Name used in log messages. */
141 struct bond *bond; /* Nonnull iff more than one port. */
142 struct lacp *lacp; /* LACP handle or null. */
143
144 enum port_vlan_mode vlan_mode; /* VLAN mode. */
145 uint16_t qinq_ethtype; /* Ethertype of dot1q-tunnel interface
146 * either 0x8100 or 0x88a8. */
147 int vlan; /* -1=trunk port, else a 12-bit VLAN ID. */
148 unsigned long *trunks; /* Bitmap of trunked VLANs, if 'vlan' == -1.
149 * NULL if all VLANs are trunked. */
150 unsigned long *cvlans; /* Bitmap of allowed customer vlans,
151 * NULL if all VLANs are allowed */
152 enum port_priority_tags_mode use_priority_tags;
153 /* Use 802.1p tag for frames in VLAN 0? */
154 bool floodable; /* No port has OFPUTIL_PC_NO_FLOOD set? */
155 bool protected; /* Protected port mode */
156 };
157
158 struct xport {
159 struct hmap_node hmap_node; /* Node in global 'xports' map. */
160 struct ofport_dpif *ofport; /* Key in global 'xports map. */
161
162 struct hmap_node ofp_node; /* Node in parent xbridge 'xports' map. */
163 ofp_port_t ofp_port; /* Key in parent xbridge 'xports' map. */
164
165 struct hmap_node uuid_node; /* Node in global 'xports_uuid' map. */
166 struct uuid uuid; /* Key in global 'xports_uuid' map. */
167
168 odp_port_t odp_port; /* Datapath port number or ODPP_NONE. */
169
170 struct ovs_list bundle_node; /* In parent xbundle (if it exists). */
171 struct xbundle *xbundle; /* Parent xbundle or null. */
172
173 struct netdev *netdev; /* 'ofport''s netdev. */
174
175 struct xbridge *xbridge; /* Parent bridge. */
176 struct xport *peer; /* Patch port peer or null. */
177
178 enum ofputil_port_config config; /* OpenFlow port configuration. */
179 enum ofputil_port_state state; /* OpenFlow port state. */
180 int stp_port_no; /* STP port number or -1 if not in use. */
181 struct rstp_port *rstp_port; /* RSTP port or null. */
182
183 struct hmap skb_priorities; /* Map of 'skb_priority_to_dscp's. */
184
185 bool may_enable; /* May be enabled in bonds. */
186 bool is_tunnel; /* Is a tunnel port. */
187 enum netdev_pt_mode pt_mode; /* packet_type handling. */
188
189 struct cfm *cfm; /* CFM handle or null. */
190 struct bfd *bfd; /* BFD handle or null. */
191 struct lldp *lldp; /* LLDP handle or null. */
192 };
193
194 struct xlate_ctx {
195 struct xlate_in *xin;
196 struct xlate_out *xout;
197
198 struct xlate_cfg *xcfg;
199 const struct xbridge *xbridge;
200
201 /* Flow at the last commit. */
202 struct flow base_flow;
203
204 /* Tunnel IP destination address as received. This is stored separately
205 * as the base_flow.tunnel is cleared on init to reflect the datapath
206 * behavior. Used to make sure not to send tunneled output to ourselves,
207 * which might lead to an infinite loop. This could happen easily
208 * if a tunnel is marked as 'ip_remote=flow', and the flow does not
209 * actually set the tun_dst field. */
210 struct in6_addr orig_tunnel_ipv6_dst;
211
212 /* Stack for the push and pop actions. See comment above nx_stack_push()
213 * in nx-match.c for info on how the stack is stored. */
214 struct ofpbuf stack;
215
216 /* The rule that we are currently translating, or NULL. */
217 struct rule_dpif *rule;
218
219 /* Flow translation populates this with wildcards relevant in translation.
220 * When 'xin->wc' is nonnull, this is the same pointer. When 'xin->wc' is
221 * null, this is a pointer to a temporary buffer. */
222 struct flow_wildcards *wc;
223
224 /* Output buffer for datapath actions. When 'xin->odp_actions' is nonnull,
225 * this is the same pointer. When 'xin->odp_actions' is null, this points
226 * to a scratch ofpbuf. This allows code to add actions to
227 * 'ctx->odp_actions' without worrying about whether the caller really
228 * wants actions. */
229 struct ofpbuf *odp_actions;
230
231 /* Statistics maintained by xlate_table_action().
232 *
233 * These statistics limit the amount of work that a single flow
234 * translation can perform. The goal of the first of these, 'depth', is
235 * primarily to prevent translation from performing an infinite amount of
236 * work. It counts the current depth of nested "resubmit"s (and a few
237 * other activities); when a resubmit returns, it decreases. Resubmits to
238 * tables in strictly monotonically increasing order don't contribute to
239 * 'depth' because they cannot cause a flow translation to take an infinite
240 * amount of time (because the number of tables is finite). Translation
241 * aborts when 'depth' exceeds MAX_DEPTH.
242 *
243 * 'resubmits', on the other hand, prevents flow translation from
244 * performing an extraordinarily large while still finite amount of work.
245 * It counts the total number of resubmits (and a few other activities)
246 * that have been executed. Returning from a resubmit does not affect this
247 * counter. Thus, this limits the amount of work that a particular
248 * translation can perform. Translation aborts when 'resubmits' exceeds
249 * MAX_RESUBMITS (which is much larger than MAX_DEPTH).
250 */
251 int depth; /* Current resubmit nesting depth. */
252 int resubmits; /* Total number of resubmits. */
253 bool in_action_set; /* Currently translating action_set, if true. */
254 bool in_packet_out; /* Currently translating a packet_out msg, if
255 * true. */
256 bool pending_encap; /* True when waiting to commit a pending
257 * encap action. */
258 bool pending_decap; /* True when waiting to commit a pending
259 * decap action. */
260 struct ofpbuf *encap_data; /* May contain a pointer to an ofpbuf with
261 * context for the datapath encap action.*/
262
263 uint8_t table_id; /* OpenFlow table ID where flow was found. */
264 ovs_be64 rule_cookie; /* Cookie of the rule being translated. */
265 uint32_t orig_skb_priority; /* Priority when packet arrived. */
266 uint32_t sflow_n_outputs; /* Number of output ports. */
267 odp_port_t sflow_odp_port; /* Output port for composing sFlow action. */
268 ofp_port_t nf_output_iface; /* Output interface index for NetFlow. */
269 bool exit; /* No further actions should be processed. */
270 mirror_mask_t mirrors; /* Bitmap of associated mirrors. */
271 int mirror_snaplen; /* Max size of a mirror packet in byte. */
272
273 /* Freezing Translation
274 * ====================
275 *
276 * At some point during translation, the code may recognize the need to halt
277 * and checkpoint the translation in a way that it can be restarted again
278 * later. We call the checkpointing process "freezing" and the restarting
279 * process "thawing".
280 *
281 * The use cases for freezing are:
282 *
283 * - "Recirculation", where the translation process discovers that it
284 * doesn't have enough information to complete translation without
285 * actually executing the actions that have already been translated,
286 * which provides the additionally needed information. In these
287 * situations, translation freezes translation and assigns the frozen
288 * data a unique "recirculation ID", which it associates with the data
289 * in a table in userspace (see ofproto-dpif-rid.h). It also adds a
290 * OVS_ACTION_ATTR_RECIRC action specifying that ID to the datapath
291 * actions. When a packet hits that action, the datapath looks its
292 * flow up again using the ID. If there's a miss, it comes back to
293 * userspace, which find the recirculation table entry for the ID,
294 * thaws the associated frozen data, and continues translation from
295 * that point given the additional information that is now known.
296 *
297 * The archetypal example is MPLS. As MPLS is implemented in
298 * OpenFlow, the protocol that follows the last MPLS label becomes
299 * known only when that label is popped by an OpenFlow action. That
300 * means that Open vSwitch can't extract the headers beyond the MPLS
301 * labels until the pop action is executed. Thus, at that point
302 * translation uses the recirculation process to extract the headers
303 * beyond the MPLS labels.
304 *
305 * (OVS also uses OVS_ACTION_ATTR_RECIRC to implement hashing for
306 * output to bonds. OVS pre-populates all the datapath flows for bond
307 * output in the datapath, though, which means that the elaborate
308 * process of coming back to userspace for a second round of
309 * translation isn't needed, and so bonds don't follow the above
310 * process.)
311 *
312 * - "Continuation". A continuation is a way for an OpenFlow controller
313 * to interpose on a packet's traversal of the OpenFlow tables. When
314 * the translation process encounters a "controller" action with the
315 * "pause" flag, it freezes translation, serializes the frozen data,
316 * and sends it to an OpenFlow controller. The controller then
317 * examines and possibly modifies the frozen data and eventually sends
318 * it back to the switch, which thaws it and continues translation.
319 *
320 * The main problem of freezing translation is preserving state, so that
321 * when the translation is thawed later it resumes from where it left off,
322 * without disruption. In particular, actions must be preserved as follows:
323 *
324 * - If we're freezing because an action needed more information, the
325 * action that prompted it.
326 *
327 * - Any actions remaining to be translated within the current flow.
328 *
329 * - If translation was frozen within a NXAST_RESUBMIT, then any actions
330 * following the resubmit action. Resubmit actions can be nested, so
331 * this has to go all the way up the control stack.
332 *
333 * - The OpenFlow 1.1+ action set.
334 *
335 * State that actions and flow table lookups can depend on, such as the
336 * following, must also be preserved:
337 *
338 * - Metadata fields (input port, registers, OF1.1+ metadata, ...).
339 *
340 * - The stack used by NXAST_STACK_PUSH and NXAST_STACK_POP actions.
341 *
342 * - The table ID and cookie of the flow being translated at each level
343 * of the control stack, because these can become visible through
344 * OFPAT_CONTROLLER actions (and other ways).
345 *
346 * Translation allows for the control of this state preservation via these
347 * members. When a need to freeze translation is identified, the
348 * translation process:
349 *
350 * 1. Sets 'freezing' to true.
351 *
352 * 2. Sets 'exit' to true to tell later steps that we're exiting from the
353 * translation process.
354 *
355 * 3. Adds an OFPACT_UNROLL_XLATE action to 'frozen_actions', and points
356 * frozen_actions.header to the action to make it easy to find it later.
357 * This action holds the current table ID and cookie so that they can be
358 * restored during a post-recirculation upcall translation.
359 *
360 * 4. Adds the action that prompted recirculation and any actions following
361 * it within the same flow to 'frozen_actions', so that they can be
362 * executed during a post-recirculation upcall translation.
363 *
364 * 5. Returns.
365 *
366 * 6. The action that prompted recirculation might be nested in a stack of
367 * nested "resubmit"s that have actions remaining. Each of these notices
368 * that we're exiting and freezing and responds by adding more
369 * OFPACT_UNROLL_XLATE actions to 'frozen_actions', as necessary,
370 * followed by any actions that were yet unprocessed.
371 *
372 * If we're freezing because of recirculation, the caller generates a
373 * recirculation ID and associates all the state produced by this process
374 * with it. For post-recirculation upcall translation, the caller passes it
375 * back in for the new translation to execute. The process yielded a set of
376 * ofpacts that can be translated directly, so it is not much of a special
377 * case at that point.
378 */
379 bool freezing;
380 bool recirc_update_dp_hash; /* Generated recirculation will be preceded
381 * by datapath HASH action to get an updated
382 * dp_hash after recirculation. */
383 uint32_t dp_hash_alg;
384 uint32_t dp_hash_basis;
385 struct ofpbuf frozen_actions;
386 const struct ofpact_controller *pause;
387
388 /* True if a packet was but is no longer MPLS (due to an MPLS pop action).
389 * This is a trigger for recirculation in cases where translating an action
390 * or looking up a flow requires access to the fields of the packet after
391 * the MPLS label stack that was originally present. */
392 bool was_mpls;
393
394 /* True if conntrack has been performed on this packet during processing
395 * on the current bridge. This is used to determine whether conntrack
396 * state from the datapath should be honored after thawing. */
397 bool conntracked;
398
399 /* Pointer to an embedded NAT action in a conntrack action, or NULL. */
400 struct ofpact_nat *ct_nat_action;
401
402 /* OpenFlow 1.1+ action set.
403 *
404 * 'action_set' accumulates "struct ofpact"s added by OFPACT_WRITE_ACTIONS.
405 * When translation is otherwise complete, ofpacts_execute_action_set()
406 * converts it to a set of "struct ofpact"s that can be translated into
407 * datapath actions. */
408 bool action_set_has_group; /* Action set contains OFPACT_GROUP? */
409 struct ofpbuf action_set; /* Action set. */
410
411 enum xlate_error error; /* Translation failed. */
412 };
413
414 /* Structure to track VLAN manipulation */
415 struct xvlan_single {
416 uint16_t tpid;
417 uint16_t vid;
418 uint16_t pcp;
419 };
420
421 struct xvlan {
422 struct xvlan_single v[FLOW_MAX_VLAN_HEADERS];
423 };
424
425 const char *xlate_strerror(enum xlate_error error)
426 {
427 switch (error) {
428 case XLATE_OK:
429 return "OK";
430 case XLATE_BRIDGE_NOT_FOUND:
431 return "Bridge not found";
432 case XLATE_RECURSION_TOO_DEEP:
433 return "Recursion too deep";
434 case XLATE_TOO_MANY_RESUBMITS:
435 return "Too many resubmits";
436 case XLATE_STACK_TOO_DEEP:
437 return "Stack too deep";
438 case XLATE_NO_RECIRCULATION_CONTEXT:
439 return "No recirculation context";
440 case XLATE_RECIRCULATION_CONFLICT:
441 return "Recirculation conflict";
442 case XLATE_TOO_MANY_MPLS_LABELS:
443 return "Too many MPLS labels";
444 case XLATE_INVALID_TUNNEL_METADATA:
445 return "Invalid tunnel metadata";
446 case XLATE_UNSUPPORTED_PACKET_TYPE:
447 return "Unsupported packet type";
448 case XLATE_CONGESTION_DROP:
449 return "Congestion Drop";
450 case XLATE_FORWARDING_DISABLED:
451 return "Forwarding is disabled";
452 case XLATE_MAX:
453 break;
454 }
455 return "Unknown error";
456 }
457
458 static void xlate_action_set(struct xlate_ctx *ctx);
459 static void xlate_commit_actions(struct xlate_ctx *ctx);
460
461 static void
462 patch_port_output(struct xlate_ctx *ctx, const struct xport *in_dev,
463 struct xport *out_dev);
464
465 static void
466 ctx_trigger_freeze(struct xlate_ctx *ctx)
467 {
468 ctx->exit = true;
469 ctx->freezing = true;
470 }
471
472 static void
473 ctx_trigger_recirculate_with_hash(struct xlate_ctx *ctx, uint32_t type,
474 uint32_t basis)
475 {
476 ctx->exit = true;
477 ctx->freezing = true;
478 ctx->recirc_update_dp_hash = true;
479 ctx->dp_hash_alg = type;
480 ctx->dp_hash_basis = basis;
481 }
482
483 static bool
484 ctx_first_frozen_action(const struct xlate_ctx *ctx)
485 {
486 return !ctx->frozen_actions.size;
487 }
488
489 static void
490 ctx_cancel_freeze(struct xlate_ctx *ctx)
491 {
492 if (ctx->freezing) {
493 ctx->freezing = false;
494 ctx->recirc_update_dp_hash = false;
495 ofpbuf_clear(&ctx->frozen_actions);
496 ctx->frozen_actions.header = NULL;
497 ctx->pause = NULL;
498 }
499 }
500
501 static void finish_freezing(struct xlate_ctx *ctx);
502
503 /* A controller may use OFPP_NONE as the ingress port to indicate that
504 * it did not arrive on a "real" port. 'ofpp_none_bundle' exists for
505 * when an input bundle is needed for validation (e.g., mirroring or
506 * OFPP_NORMAL processing). It is not connected to an 'ofproto' or have
507 * any 'port' structs, so care must be taken when dealing with it. */
508 static struct xbundle ofpp_none_bundle = {
509 .name = "OFPP_NONE",
510 .vlan_mode = PORT_VLAN_TRUNK
511 };
512
513 /* Node in 'xport''s 'skb_priorities' map. Used to maintain a map from
514 * 'priority' (the datapath's term for QoS queue) to the dscp bits which all
515 * traffic egressing the 'ofport' with that priority should be marked with. */
516 struct skb_priority_to_dscp {
517 struct hmap_node hmap_node; /* Node in 'ofport_dpif''s 'skb_priorities'. */
518 uint32_t skb_priority; /* Priority of this queue (see struct flow). */
519
520 uint8_t dscp; /* DSCP bits to mark outgoing traffic with. */
521 };
522
523 /* Xlate config contains hash maps of all bridges, bundles and ports.
524 * Xcfgp contains the pointer to the current xlate configuration.
525 * When the main thread needs to change the configuration, it copies xcfgp to
526 * new_xcfg and edits new_xcfg. This enables the use of RCU locking which
527 * does not block handler and revalidator threads. */
528 struct xlate_cfg {
529 struct hmap xbridges;
530 struct hmap xbundles;
531 struct hmap xports;
532 struct hmap xports_uuid;
533 };
534 static OVSRCU_TYPE(struct xlate_cfg *) xcfgp = OVSRCU_INITIALIZER(NULL);
535 static struct xlate_cfg *new_xcfg = NULL;
536
537 typedef void xlate_actions_handler(const struct ofpact *, size_t ofpacts_len,
538 struct xlate_ctx *, bool, bool);
539 static bool may_receive(const struct xport *, struct xlate_ctx *);
540 static void do_xlate_actions(const struct ofpact *, size_t ofpacts_len,
541 struct xlate_ctx *, bool, bool);
542 static void clone_xlate_actions(const struct ofpact *, size_t ofpacts_len,
543 struct xlate_ctx *, bool, bool);
544 static void xlate_normal(struct xlate_ctx *);
545 static void xlate_normal_flood(struct xlate_ctx *ct,
546 struct xbundle *in_xbundle, struct xvlan *);
547 static void xlate_table_action(struct xlate_ctx *, ofp_port_t in_port,
548 uint8_t table_id, bool may_packet_in,
549 bool honor_table_miss, bool with_ct_orig,
550 bool is_last_action, xlate_actions_handler *);
551
552 static bool input_vid_is_valid(const struct xlate_ctx *,
553 uint16_t vid, struct xbundle *);
554 static void xvlan_copy(struct xvlan *dst, const struct xvlan *src);
555 static void xvlan_pop(struct xvlan *src);
556 static void xvlan_push_uninit(struct xvlan *src);
557 static void xvlan_extract(const struct flow *, struct xvlan *);
558 static void xvlan_put(struct flow *, const struct xvlan *,
559 enum port_priority_tags_mode);
560 static void xvlan_input_translate(const struct xbundle *,
561 const struct xvlan *in,
562 struct xvlan *xvlan);
563 static void xvlan_output_translate(const struct xbundle *,
564 const struct xvlan *xvlan,
565 struct xvlan *out);
566 static void output_normal(struct xlate_ctx *, const struct xbundle *,
567 const struct xvlan *);
568
569 /* Optional bond recirculation parameter to compose_output_action(). */
570 struct xlate_bond_recirc {
571 uint32_t recirc_id; /* !0 Use recirculation instead of output. */
572 uint8_t hash_alg; /* !0 Compute hash for recirc before. */
573 uint32_t hash_basis; /* Compute hash for recirc before. */
574 };
575
576 static void compose_output_action(struct xlate_ctx *, ofp_port_t ofp_port,
577 const struct xlate_bond_recirc *xr,
578 bool is_last_action, bool truncate);
579
580 static struct xbridge *xbridge_lookup(struct xlate_cfg *,
581 const struct ofproto_dpif *);
582 static struct xbridge *xbridge_lookup_by_uuid(struct xlate_cfg *,
583 const struct uuid *);
584 static struct xbundle *xbundle_lookup(struct xlate_cfg *,
585 const struct ofbundle *);
586 static struct xport *xport_lookup(struct xlate_cfg *,
587 const struct ofport_dpif *);
588 static struct xport *xport_lookup_by_uuid(struct xlate_cfg *,
589 const struct uuid *);
590 static struct xport *get_ofp_port(const struct xbridge *, ofp_port_t ofp_port);
591 static struct skb_priority_to_dscp *get_skb_priority(const struct xport *,
592 uint32_t skb_priority);
593 static void clear_skb_priorities(struct xport *);
594 static size_t count_skb_priorities(const struct xport *);
595 static bool dscp_from_skb_priority(const struct xport *, uint32_t skb_priority,
596 uint8_t *dscp);
597
598 static void xlate_xbridge_init(struct xlate_cfg *, struct xbridge *);
599 static void xlate_xbundle_init(struct xlate_cfg *, struct xbundle *);
600 static void xlate_xport_init(struct xlate_cfg *, struct xport *);
601 static void xlate_xbridge_set(struct xbridge *, struct dpif *,
602 const struct mac_learning *, struct stp *,
603 struct rstp *, const struct mcast_snooping *,
604 const struct mbridge *,
605 const struct dpif_sflow *,
606 const struct dpif_ipfix *,
607 const struct netflow *,
608 bool forward_bpdu, bool has_in_band,
609 const struct dpif_backer_support *,
610 const struct xbridge_addr *);
611 static void xlate_xbundle_set(struct xbundle *xbundle,
612 enum port_vlan_mode vlan_mode,
613 uint16_t qinq_ethtype, int vlan,
614 unsigned long *trunks, unsigned long *cvlans,
615 enum port_priority_tags_mode,
616 const struct bond *bond, const struct lacp *lacp,
617 bool floodable, bool protected);
618 static void xlate_xport_set(struct xport *xport, odp_port_t odp_port,
619 const struct netdev *netdev, const struct cfm *cfm,
620 const struct bfd *bfd, const struct lldp *lldp,
621 int stp_port_no, const struct rstp_port *rstp_port,
622 enum ofputil_port_config config,
623 enum ofputil_port_state state, bool is_tunnel,
624 bool may_enable);
625 static void xlate_xbridge_remove(struct xlate_cfg *, struct xbridge *);
626 static void xlate_xbundle_remove(struct xlate_cfg *, struct xbundle *);
627 static void xlate_xport_remove(struct xlate_cfg *, struct xport *);
628 static void xlate_xbridge_copy(struct xbridge *);
629 static void xlate_xbundle_copy(struct xbridge *, struct xbundle *);
630 static void xlate_xport_copy(struct xbridge *, struct xbundle *,
631 struct xport *);
632 static void xlate_xcfg_free(struct xlate_cfg *);
633 \f
634 /* Tracing helpers. */
635
636 /* If tracing is enabled in 'ctx', creates a new trace node and appends it to
637 * the list of nodes maintained in ctx->xin. The new node has type 'type' and
638 * its text is created from 'format' by treating it as a printf format string.
639 * Returns the list of nodes embedded within the new trace node; ordinarily,
640 * the calleer can ignore this, but it is useful if the caller needs to nest
641 * more trace nodes within the new node.
642 *
643 * If tracing is not enabled, does nothing and returns NULL. */
644 static struct ovs_list * OVS_PRINTF_FORMAT(3, 4)
645 xlate_report(const struct xlate_ctx *ctx, enum oftrace_node_type type,
646 const char *format, ...)
647 {
648 struct ovs_list *subtrace = NULL;
649 if (OVS_UNLIKELY(ctx->xin->trace)) {
650 va_list args;
651 va_start(args, format);
652 char *text = xvasprintf(format, args);
653 subtrace = &oftrace_report(ctx->xin->trace, type, text)->subs;
654 va_end(args);
655 free(text);
656 }
657 return subtrace;
658 }
659
660 /* This is like xlate_report() for errors that are serious enough that we
661 * should log them even if we are not tracing. */
662 static void OVS_PRINTF_FORMAT(2, 3)
663 xlate_report_error(const struct xlate_ctx *ctx, const char *format, ...)
664 {
665 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
666 if (!OVS_UNLIKELY(ctx->xin->trace)
667 && (!ctx->xin->packet || VLOG_DROP_WARN(&rl))) {
668 return;
669 }
670
671 struct ds s = DS_EMPTY_INITIALIZER;
672 va_list args;
673 va_start(args, format);
674 ds_put_format_valist(&s, format, args);
675 va_end(args);
676
677 if (ctx->xin->trace) {
678 oftrace_report(ctx->xin->trace, OFT_ERROR, ds_cstr(&s));
679 } else {
680 ds_put_format(&s, " on bridge %s while processing ",
681 ctx->xbridge->name);
682 flow_format(&s, &ctx->base_flow, NULL);
683 VLOG_WARN("%s", ds_cstr(&s));
684 }
685 ds_destroy(&s);
686 }
687
688 /* This is like xlate_report() for messages that should be logged
689 at the info level (even when not tracing). */
690 static void OVS_PRINTF_FORMAT(2, 3)
691 xlate_report_info(const struct xlate_ctx *ctx, const char *format, ...)
692 {
693 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
694 if (!OVS_UNLIKELY(ctx->xin->trace)
695 && (!ctx->xin->packet || VLOG_DROP_INFO(&rl))) {
696 return;
697 }
698
699 struct ds s = DS_EMPTY_INITIALIZER;
700 va_list args;
701 va_start(args, format);
702 ds_put_format_valist(&s, format, args);
703 va_end(args);
704
705 if (ctx->xin->trace) {
706 oftrace_report(ctx->xin->trace, OFT_WARN, ds_cstr(&s));
707 } else {
708 ds_put_format(&s, " on bridge %s while processing ",
709 ctx->xbridge->name);
710 flow_format(&s, &ctx->base_flow, NULL);
711 VLOG_INFO("%s", ds_cstr(&s));
712 }
713 ds_destroy(&s);
714 }
715
716 /* This is like xlate_report() for messages that should be logged at debug
717 * level (even if we are not tracing) because they can be valuable for
718 * debugging. */
719 static void OVS_PRINTF_FORMAT(3, 4)
720 xlate_report_debug(const struct xlate_ctx *ctx, enum oftrace_node_type type,
721 const char *format, ...)
722 {
723 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(30, 300);
724 if (!OVS_UNLIKELY(ctx->xin->trace)
725 && (!ctx->xin->packet || VLOG_DROP_DBG(&rl))) {
726 return;
727 }
728
729 struct ds s = DS_EMPTY_INITIALIZER;
730 va_list args;
731 va_start(args, format);
732 ds_put_format_valist(&s, format, args);
733 va_end(args);
734
735 if (ctx->xin->trace) {
736 oftrace_report(ctx->xin->trace, type, ds_cstr(&s));
737 } else {
738 VLOG_DBG("bridge %s: %s", ctx->xbridge->name, ds_cstr(&s));
739 }
740 ds_destroy(&s);
741 }
742
743 /* If tracing is enabled in 'ctx', appends a node of the given 'type' to the
744 * trace, whose text is 'title' followed by a formatted version of the
745 * 'ofpacts_len' OpenFlow actions in 'ofpacts'.
746 *
747 * If tracing is not enabled, does nothing. */
748 static void
749 xlate_report_actions(const struct xlate_ctx *ctx, enum oftrace_node_type type,
750 const char *title,
751 const struct ofpact *ofpacts, size_t ofpacts_len)
752 {
753 if (OVS_UNLIKELY(ctx->xin->trace)) {
754 struct ds s = DS_EMPTY_INITIALIZER;
755 ds_put_format(&s, "%s: ", title);
756 struct ofpact_format_params fp = { .s = &s };
757 ofpacts_format(ofpacts, ofpacts_len, &fp);
758 oftrace_report(ctx->xin->trace, type, ds_cstr(&s));
759 ds_destroy(&s);
760 }
761 }
762
763 /* If tracing is enabled in 'ctx', appends a node of type OFT_DETAIL to the
764 * trace, whose the message is a formatted version of the OpenFlow action set.
765 * 'verb' should be "was" or "is", depending on whether the action set reported
766 * is the new action set or the old one.
767 *
768 * If tracing is not enabled, does nothing. */
769 static void
770 xlate_report_action_set(const struct xlate_ctx *ctx, const char *verb)
771 {
772 if (OVS_UNLIKELY(ctx->xin->trace)) {
773 struct ofpbuf action_list;
774 ofpbuf_init(&action_list, 0);
775 ofpacts_execute_action_set(&action_list, &ctx->action_set);
776 if (action_list.size) {
777 struct ds s = DS_EMPTY_INITIALIZER;
778 struct ofpact_format_params fp = { .s = &s };
779 ofpacts_format(action_list.data, action_list.size, &fp);
780 xlate_report(ctx, OFT_DETAIL, "action set %s: %s",
781 verb, ds_cstr(&s));
782 ds_destroy(&s);
783 } else {
784 xlate_report(ctx, OFT_DETAIL, "action set %s empty", verb);
785 }
786 ofpbuf_uninit(&action_list);
787 }
788 }
789
790
791 /* If tracing is enabled in 'ctx', appends a node representing 'rule' (in
792 * OpenFlow table 'table_id') to the trace and makes this node the parent for
793 * future trace nodes. The caller should save ctx->xin->trace before calling
794 * this function, then after tracing all of the activities under the table,
795 * restore its previous value.
796 *
797 * If tracing is not enabled, does nothing. */
798 static void
799 xlate_report_table(const struct xlate_ctx *ctx, struct rule_dpif *rule,
800 uint8_t table_id)
801 {
802 if (OVS_LIKELY(!ctx->xin->trace)) {
803 return;
804 }
805
806 struct ds s = DS_EMPTY_INITIALIZER;
807 ds_put_format(&s, "%2d. ", table_id);
808 if (rule == ctx->xin->ofproto->miss_rule) {
809 ds_put_cstr(&s, "No match, and a \"packet-in\" is called for.");
810 } else if (rule == ctx->xin->ofproto->no_packet_in_rule) {
811 ds_put_cstr(&s, "No match.");
812 } else if (rule == ctx->xin->ofproto->drop_frags_rule) {
813 ds_put_cstr(&s, "Packets are IP fragments and "
814 "the fragment handling mode is \"drop\".");
815 } else {
816 minimatch_format(&rule->up.cr.match,
817 ofproto_get_tun_tab(&ctx->xin->ofproto->up),
818 NULL, &s, OFP_DEFAULT_PRIORITY);
819 if (ds_last(&s) != ' ') {
820 ds_put_cstr(&s, ", ");
821 }
822 ds_put_format(&s, "priority %d", rule->up.cr.priority);
823 if (rule->up.flow_cookie) {
824 ds_put_format(&s, ", cookie %#"PRIx64,
825 ntohll(rule->up.flow_cookie));
826 }
827 }
828 ctx->xin->trace = &oftrace_report(ctx->xin->trace, OFT_TABLE,
829 ds_cstr(&s))->subs;
830 ds_destroy(&s);
831 }
832
833 /* If tracing is enabled in 'ctx', adds an OFT_DETAIL trace node to 'ctx'
834 * reporting the value of subfield 'sf'.
835 *
836 * If tracing is not enabled, does nothing. */
837 static void
838 xlate_report_subfield(const struct xlate_ctx *ctx,
839 const struct mf_subfield *sf)
840 {
841 if (OVS_UNLIKELY(ctx->xin->trace)) {
842 struct ds s = DS_EMPTY_INITIALIZER;
843 mf_format_subfield(sf, &s);
844 ds_put_cstr(&s, " is now ");
845
846 if (sf->ofs == 0 && sf->n_bits >= sf->field->n_bits) {
847 union mf_value value;
848 mf_get_value(sf->field, &ctx->xin->flow, &value);
849 mf_format(sf->field, &value, NULL, NULL, &s);
850 } else {
851 union mf_subvalue cst;
852 mf_read_subfield(sf, &ctx->xin->flow, &cst);
853 ds_put_hex(&s, &cst, sizeof cst);
854 }
855
856 xlate_report(ctx, OFT_DETAIL, "%s", ds_cstr(&s));
857
858 ds_destroy(&s);
859 }
860 }
861 \f
862 static void
863 xlate_xbridge_init(struct xlate_cfg *xcfg, struct xbridge *xbridge)
864 {
865 ovs_list_init(&xbridge->xbundles);
866 hmap_init(&xbridge->xports);
867 hmap_insert(&xcfg->xbridges, &xbridge->hmap_node,
868 hash_pointer(xbridge->ofproto, 0));
869 }
870
871 static void
872 xlate_xbundle_init(struct xlate_cfg *xcfg, struct xbundle *xbundle)
873 {
874 ovs_list_init(&xbundle->xports);
875 ovs_list_insert(&xbundle->xbridge->xbundles, &xbundle->list_node);
876 hmap_insert(&xcfg->xbundles, &xbundle->hmap_node,
877 hash_pointer(xbundle->ofbundle, 0));
878 }
879
880 static void
881 xlate_xport_init(struct xlate_cfg *xcfg, struct xport *xport)
882 {
883 hmap_init(&xport->skb_priorities);
884 hmap_insert(&xcfg->xports, &xport->hmap_node,
885 hash_pointer(xport->ofport, 0));
886 hmap_insert(&xport->xbridge->xports, &xport->ofp_node,
887 hash_ofp_port(xport->ofp_port));
888 hmap_insert(&xcfg->xports_uuid, &xport->uuid_node,
889 uuid_hash(&xport->uuid));
890 }
891
892 static struct xbridge_addr *
893 xbridge_addr_create(struct xbridge *xbridge)
894 {
895 struct xbridge_addr *xbridge_addr = xbridge->addr;
896 struct in6_addr *addr = NULL, *mask = NULL;
897 struct netdev *dev;
898 int err, n_addr = 0;
899
900 err = netdev_open(xbridge->name, NULL, &dev);
901 if (!err) {
902 err = netdev_get_addr_list(dev, &addr, &mask, &n_addr);
903 if (!err) {
904 if (!xbridge->addr ||
905 n_addr != xbridge->addr->n_addr ||
906 (xbridge->addr->addr && memcmp(addr, xbridge->addr->addr,
907 sizeof(*addr) * n_addr))) {
908 xbridge_addr = xzalloc(sizeof *xbridge_addr);
909 xbridge_addr->addr = addr;
910 xbridge_addr->n_addr = n_addr;
911 ovs_refcount_init(&xbridge_addr->ref_cnt);
912 } else {
913 free(addr);
914 }
915 free(mask);
916 }
917 netdev_close(dev);
918 }
919
920 return xbridge_addr;
921 }
922
923 static struct xbridge_addr *
924 xbridge_addr_ref(const struct xbridge_addr *addr_)
925 {
926 struct xbridge_addr *addr = CONST_CAST(struct xbridge_addr *, addr_);
927 if (addr) {
928 ovs_refcount_ref(&addr->ref_cnt);
929 }
930 return addr;
931 }
932
933 static void
934 xbridge_addr_unref(struct xbridge_addr *addr)
935 {
936 if (addr && ovs_refcount_unref_relaxed(&addr->ref_cnt) == 1) {
937 free(addr->addr);
938 free(addr);
939 }
940 }
941
942 static void
943 xlate_xbridge_set(struct xbridge *xbridge,
944 struct dpif *dpif,
945 const struct mac_learning *ml, struct stp *stp,
946 struct rstp *rstp, const struct mcast_snooping *ms,
947 const struct mbridge *mbridge,
948 const struct dpif_sflow *sflow,
949 const struct dpif_ipfix *ipfix,
950 const struct netflow *netflow,
951 bool forward_bpdu, bool has_in_band,
952 const struct dpif_backer_support *support,
953 const struct xbridge_addr *addr)
954 {
955 if (xbridge->ml != ml) {
956 mac_learning_unref(xbridge->ml);
957 xbridge->ml = mac_learning_ref(ml);
958 }
959
960 if (xbridge->ms != ms) {
961 mcast_snooping_unref(xbridge->ms);
962 xbridge->ms = mcast_snooping_ref(ms);
963 }
964
965 if (xbridge->mbridge != mbridge) {
966 mbridge_unref(xbridge->mbridge);
967 xbridge->mbridge = mbridge_ref(mbridge);
968 }
969
970 if (xbridge->sflow != sflow) {
971 dpif_sflow_unref(xbridge->sflow);
972 xbridge->sflow = dpif_sflow_ref(sflow);
973 }
974
975 if (xbridge->ipfix != ipfix) {
976 dpif_ipfix_unref(xbridge->ipfix);
977 xbridge->ipfix = dpif_ipfix_ref(ipfix);
978 }
979
980 if (xbridge->stp != stp) {
981 stp_unref(xbridge->stp);
982 xbridge->stp = stp_ref(stp);
983 }
984
985 if (xbridge->rstp != rstp) {
986 rstp_unref(xbridge->rstp);
987 xbridge->rstp = rstp_ref(rstp);
988 }
989
990 if (xbridge->netflow != netflow) {
991 netflow_unref(xbridge->netflow);
992 xbridge->netflow = netflow_ref(netflow);
993 }
994
995 if (xbridge->addr != addr) {
996 xbridge_addr_unref(xbridge->addr);
997 xbridge->addr = xbridge_addr_ref(addr);
998 }
999
1000 xbridge->dpif = dpif;
1001 xbridge->forward_bpdu = forward_bpdu;
1002 xbridge->has_in_band = has_in_band;
1003 xbridge->support = *support;
1004 }
1005
1006 static void
1007 xlate_xbundle_set(struct xbundle *xbundle,
1008 enum port_vlan_mode vlan_mode, uint16_t qinq_ethtype,
1009 int vlan, unsigned long *trunks, unsigned long *cvlans,
1010 enum port_priority_tags_mode use_priority_tags,
1011 const struct bond *bond, const struct lacp *lacp,
1012 bool floodable, bool protected)
1013 {
1014 ovs_assert(xbundle->xbridge);
1015
1016 xbundle->vlan_mode = vlan_mode;
1017 xbundle->qinq_ethtype = qinq_ethtype;
1018 xbundle->vlan = vlan;
1019 xbundle->trunks = trunks;
1020 xbundle->cvlans = cvlans;
1021 xbundle->use_priority_tags = use_priority_tags;
1022 xbundle->floodable = floodable;
1023 xbundle->protected = protected;
1024
1025 if (xbundle->bond != bond) {
1026 bond_unref(xbundle->bond);
1027 xbundle->bond = bond_ref(bond);
1028 }
1029
1030 if (xbundle->lacp != lacp) {
1031 lacp_unref(xbundle->lacp);
1032 xbundle->lacp = lacp_ref(lacp);
1033 }
1034 }
1035
1036 static void
1037 xlate_xport_set(struct xport *xport, odp_port_t odp_port,
1038 const struct netdev *netdev, const struct cfm *cfm,
1039 const struct bfd *bfd, const struct lldp *lldp, int stp_port_no,
1040 const struct rstp_port* rstp_port,
1041 enum ofputil_port_config config, enum ofputil_port_state state,
1042 bool is_tunnel, bool may_enable)
1043 {
1044 xport->config = config;
1045 xport->state = state;
1046 xport->stp_port_no = stp_port_no;
1047 xport->is_tunnel = is_tunnel;
1048 xport->pt_mode = netdev_get_pt_mode(netdev);
1049 xport->may_enable = may_enable;
1050 xport->odp_port = odp_port;
1051
1052 if (xport->rstp_port != rstp_port) {
1053 rstp_port_unref(xport->rstp_port);
1054 xport->rstp_port = rstp_port_ref(rstp_port);
1055 }
1056
1057 if (xport->cfm != cfm) {
1058 cfm_unref(xport->cfm);
1059 xport->cfm = cfm_ref(cfm);
1060 }
1061
1062 if (xport->bfd != bfd) {
1063 bfd_unref(xport->bfd);
1064 xport->bfd = bfd_ref(bfd);
1065 }
1066
1067 if (xport->lldp != lldp) {
1068 lldp_unref(xport->lldp);
1069 xport->lldp = lldp_ref(lldp);
1070 }
1071
1072 if (xport->netdev != netdev) {
1073 netdev_close(xport->netdev);
1074 xport->netdev = netdev_ref(netdev);
1075 }
1076 }
1077
1078 static void
1079 xlate_xbridge_copy(struct xbridge *xbridge)
1080 {
1081 struct xbundle *xbundle;
1082 struct xport *xport;
1083 struct xbridge *new_xbridge = xzalloc(sizeof *xbridge);
1084 new_xbridge->ofproto = xbridge->ofproto;
1085 new_xbridge->name = xstrdup(xbridge->name);
1086 xlate_xbridge_init(new_xcfg, new_xbridge);
1087
1088 xlate_xbridge_set(new_xbridge,
1089 xbridge->dpif, xbridge->ml, xbridge->stp,
1090 xbridge->rstp, xbridge->ms, xbridge->mbridge,
1091 xbridge->sflow, xbridge->ipfix, xbridge->netflow,
1092 xbridge->forward_bpdu, xbridge->has_in_band,
1093 &xbridge->support, xbridge->addr);
1094 LIST_FOR_EACH (xbundle, list_node, &xbridge->xbundles) {
1095 xlate_xbundle_copy(new_xbridge, xbundle);
1096 }
1097
1098 /* Copy xports which are not part of a xbundle */
1099 HMAP_FOR_EACH (xport, ofp_node, &xbridge->xports) {
1100 if (!xport->xbundle) {
1101 xlate_xport_copy(new_xbridge, NULL, xport);
1102 }
1103 }
1104 }
1105
1106 static void
1107 xlate_xbundle_copy(struct xbridge *xbridge, struct xbundle *xbundle)
1108 {
1109 struct xport *xport;
1110 struct xbundle *new_xbundle = xzalloc(sizeof *xbundle);
1111 new_xbundle->ofbundle = xbundle->ofbundle;
1112 new_xbundle->xbridge = xbridge;
1113 new_xbundle->name = xstrdup(xbundle->name);
1114 xlate_xbundle_init(new_xcfg, new_xbundle);
1115
1116 xlate_xbundle_set(new_xbundle, xbundle->vlan_mode, xbundle->qinq_ethtype,
1117 xbundle->vlan, xbundle->trunks, xbundle->cvlans,
1118 xbundle->use_priority_tags, xbundle->bond, xbundle->lacp,
1119 xbundle->floodable, xbundle->protected);
1120 LIST_FOR_EACH (xport, bundle_node, &xbundle->xports) {
1121 xlate_xport_copy(xbridge, new_xbundle, xport);
1122 }
1123 }
1124
1125 static void
1126 xlate_xport_copy(struct xbridge *xbridge, struct xbundle *xbundle,
1127 struct xport *xport)
1128 {
1129 struct skb_priority_to_dscp *pdscp, *new_pdscp;
1130 struct xport *new_xport = xzalloc(sizeof *xport);
1131 new_xport->ofport = xport->ofport;
1132 new_xport->ofp_port = xport->ofp_port;
1133 new_xport->xbridge = xbridge;
1134 new_xport->uuid = xport->uuid;
1135 xlate_xport_init(new_xcfg, new_xport);
1136
1137 xlate_xport_set(new_xport, xport->odp_port, xport->netdev, xport->cfm,
1138 xport->bfd, xport->lldp, xport->stp_port_no,
1139 xport->rstp_port, xport->config, xport->state,
1140 xport->is_tunnel, xport->may_enable);
1141
1142 if (xport->peer) {
1143 struct xport *peer = xport_lookup(new_xcfg, xport->peer->ofport);
1144 if (peer) {
1145 new_xport->peer = peer;
1146 new_xport->peer->peer = new_xport;
1147 }
1148 }
1149
1150 if (xbundle) {
1151 new_xport->xbundle = xbundle;
1152 ovs_list_insert(&new_xport->xbundle->xports, &new_xport->bundle_node);
1153 }
1154
1155 HMAP_FOR_EACH (pdscp, hmap_node, &xport->skb_priorities) {
1156 new_pdscp = xmalloc(sizeof *pdscp);
1157 new_pdscp->skb_priority = pdscp->skb_priority;
1158 new_pdscp->dscp = pdscp->dscp;
1159 hmap_insert(&new_xport->skb_priorities, &new_pdscp->hmap_node,
1160 hash_int(new_pdscp->skb_priority, 0));
1161 }
1162 }
1163
1164 /* Sets the current xlate configuration to new_xcfg and frees the old xlate
1165 * configuration in xcfgp.
1166 *
1167 * This needs to be called after editing the xlate configuration.
1168 *
1169 * Functions that edit the new xlate configuration are
1170 * xlate_<ofproto/bundle/ofport>_set and xlate_<ofproto/bundle/ofport>_remove.
1171 *
1172 * A sample workflow:
1173 *
1174 * xlate_txn_start();
1175 * ...
1176 * edit_xlate_configuration();
1177 * ...
1178 * xlate_txn_commit();
1179 *
1180 * The ovsrcu_synchronize() call here also ensures that the upcall threads
1181 * retain no references to anything in the previous configuration.
1182 */
1183 void
1184 xlate_txn_commit(void)
1185 {
1186 struct xlate_cfg *xcfg = ovsrcu_get(struct xlate_cfg *, &xcfgp);
1187
1188 ovsrcu_set(&xcfgp, new_xcfg);
1189 ovsrcu_synchronize();
1190 xlate_xcfg_free(xcfg);
1191 new_xcfg = NULL;
1192 }
1193
1194 /* Copies the current xlate configuration in xcfgp to new_xcfg.
1195 *
1196 * This needs to be called prior to editing the xlate configuration. */
1197 void
1198 xlate_txn_start(void)
1199 {
1200 struct xbridge *xbridge;
1201 struct xlate_cfg *xcfg;
1202
1203 ovs_assert(!new_xcfg);
1204
1205 new_xcfg = xmalloc(sizeof *new_xcfg);
1206 hmap_init(&new_xcfg->xbridges);
1207 hmap_init(&new_xcfg->xbundles);
1208 hmap_init(&new_xcfg->xports);
1209 hmap_init(&new_xcfg->xports_uuid);
1210
1211 xcfg = ovsrcu_get(struct xlate_cfg *, &xcfgp);
1212 if (!xcfg) {
1213 return;
1214 }
1215
1216 HMAP_FOR_EACH (xbridge, hmap_node, &xcfg->xbridges) {
1217 xlate_xbridge_copy(xbridge);
1218 }
1219 }
1220
1221
1222 static void
1223 xlate_xcfg_free(struct xlate_cfg *xcfg)
1224 {
1225 struct xbridge *xbridge, *next_xbridge;
1226
1227 if (!xcfg) {
1228 return;
1229 }
1230
1231 HMAP_FOR_EACH_SAFE (xbridge, next_xbridge, hmap_node, &xcfg->xbridges) {
1232 xlate_xbridge_remove(xcfg, xbridge);
1233 }
1234
1235 hmap_destroy(&xcfg->xbridges);
1236 hmap_destroy(&xcfg->xbundles);
1237 hmap_destroy(&xcfg->xports);
1238 hmap_destroy(&xcfg->xports_uuid);
1239 free(xcfg);
1240 }
1241
1242 void
1243 xlate_ofproto_set(struct ofproto_dpif *ofproto, const char *name,
1244 struct dpif *dpif,
1245 const struct mac_learning *ml, struct stp *stp,
1246 struct rstp *rstp, const struct mcast_snooping *ms,
1247 const struct mbridge *mbridge,
1248 const struct dpif_sflow *sflow,
1249 const struct dpif_ipfix *ipfix,
1250 const struct netflow *netflow,
1251 bool forward_bpdu, bool has_in_band,
1252 const struct dpif_backer_support *support)
1253 {
1254 struct xbridge *xbridge;
1255 struct xbridge_addr *xbridge_addr, *old_addr;
1256
1257 ovs_assert(new_xcfg);
1258
1259 xbridge = xbridge_lookup(new_xcfg, ofproto);
1260 if (!xbridge) {
1261 xbridge = xzalloc(sizeof *xbridge);
1262 xbridge->ofproto = ofproto;
1263
1264 xlate_xbridge_init(new_xcfg, xbridge);
1265 }
1266
1267 free(xbridge->name);
1268 xbridge->name = xstrdup(name);
1269
1270 xbridge_addr = xbridge_addr_create(xbridge);
1271 old_addr = xbridge->addr;
1272
1273 xlate_xbridge_set(xbridge, dpif, ml, stp, rstp, ms, mbridge, sflow, ipfix,
1274 netflow, forward_bpdu, has_in_band, support,
1275 xbridge_addr);
1276
1277 if (xbridge_addr != old_addr) {
1278 xbridge_addr_unref(xbridge_addr);
1279 }
1280 }
1281
1282 static void
1283 xlate_xbridge_remove(struct xlate_cfg *xcfg, struct xbridge *xbridge)
1284 {
1285 struct xbundle *xbundle, *next_xbundle;
1286 struct xport *xport, *next_xport;
1287
1288 if (!xbridge) {
1289 return;
1290 }
1291
1292 HMAP_FOR_EACH_SAFE (xport, next_xport, ofp_node, &xbridge->xports) {
1293 xlate_xport_remove(xcfg, xport);
1294 }
1295
1296 LIST_FOR_EACH_SAFE (xbundle, next_xbundle, list_node, &xbridge->xbundles) {
1297 xlate_xbundle_remove(xcfg, xbundle);
1298 }
1299
1300 hmap_remove(&xcfg->xbridges, &xbridge->hmap_node);
1301 mac_learning_unref(xbridge->ml);
1302 mcast_snooping_unref(xbridge->ms);
1303 mbridge_unref(xbridge->mbridge);
1304 dpif_sflow_unref(xbridge->sflow);
1305 dpif_ipfix_unref(xbridge->ipfix);
1306 netflow_unref(xbridge->netflow);
1307 stp_unref(xbridge->stp);
1308 rstp_unref(xbridge->rstp);
1309 xbridge_addr_unref(xbridge->addr);
1310 hmap_destroy(&xbridge->xports);
1311 free(xbridge->name);
1312 free(xbridge);
1313 }
1314
1315 void
1316 xlate_remove_ofproto(struct ofproto_dpif *ofproto)
1317 {
1318 struct xbridge *xbridge;
1319
1320 ovs_assert(new_xcfg);
1321
1322 xbridge = xbridge_lookup(new_xcfg, ofproto);
1323 xlate_xbridge_remove(new_xcfg, xbridge);
1324 }
1325
1326 void
1327 xlate_bundle_set(struct ofproto_dpif *ofproto, struct ofbundle *ofbundle,
1328 const char *name, enum port_vlan_mode vlan_mode,
1329 uint16_t qinq_ethtype, int vlan,
1330 unsigned long *trunks, unsigned long *cvlans,
1331 enum port_priority_tags_mode use_priority_tags,
1332 const struct bond *bond, const struct lacp *lacp,
1333 bool floodable, bool protected)
1334 {
1335 struct xbundle *xbundle;
1336
1337 ovs_assert(new_xcfg);
1338
1339 xbundle = xbundle_lookup(new_xcfg, ofbundle);
1340 if (!xbundle) {
1341 xbundle = xzalloc(sizeof *xbundle);
1342 xbundle->ofbundle = ofbundle;
1343 xbundle->xbridge = xbridge_lookup(new_xcfg, ofproto);
1344
1345 xlate_xbundle_init(new_xcfg, xbundle);
1346 }
1347
1348 free(xbundle->name);
1349 xbundle->name = xstrdup(name);
1350
1351 xlate_xbundle_set(xbundle, vlan_mode, qinq_ethtype, vlan, trunks, cvlans,
1352 use_priority_tags, bond, lacp, floodable, protected);
1353 }
1354
1355 static void
1356 xlate_xbundle_remove(struct xlate_cfg *xcfg, struct xbundle *xbundle)
1357 {
1358 struct xport *xport;
1359
1360 if (!xbundle) {
1361 return;
1362 }
1363
1364 LIST_FOR_EACH_POP (xport, bundle_node, &xbundle->xports) {
1365 xport->xbundle = NULL;
1366 }
1367
1368 hmap_remove(&xcfg->xbundles, &xbundle->hmap_node);
1369 ovs_list_remove(&xbundle->list_node);
1370 bond_unref(xbundle->bond);
1371 lacp_unref(xbundle->lacp);
1372 free(xbundle->name);
1373 free(xbundle);
1374 }
1375
1376 void
1377 xlate_bundle_remove(struct ofbundle *ofbundle)
1378 {
1379 struct xbundle *xbundle;
1380
1381 ovs_assert(new_xcfg);
1382
1383 xbundle = xbundle_lookup(new_xcfg, ofbundle);
1384 xlate_xbundle_remove(new_xcfg, xbundle);
1385 }
1386
1387 void
1388 xlate_ofport_set(struct ofproto_dpif *ofproto, struct ofbundle *ofbundle,
1389 struct ofport_dpif *ofport, ofp_port_t ofp_port,
1390 odp_port_t odp_port, const struct netdev *netdev,
1391 const struct cfm *cfm, const struct bfd *bfd,
1392 const struct lldp *lldp, struct ofport_dpif *peer,
1393 int stp_port_no, const struct rstp_port *rstp_port,
1394 const struct ofproto_port_queue *qdscp_list, size_t n_qdscp,
1395 enum ofputil_port_config config,
1396 enum ofputil_port_state state, bool is_tunnel,
1397 bool may_enable)
1398 {
1399 size_t i;
1400 struct xport *xport;
1401
1402 ovs_assert(new_xcfg);
1403
1404 xport = xport_lookup(new_xcfg, ofport);
1405 if (!xport) {
1406 xport = xzalloc(sizeof *xport);
1407 xport->ofport = ofport;
1408 xport->xbridge = xbridge_lookup(new_xcfg, ofproto);
1409 xport->ofp_port = ofp_port;
1410 uuid_generate(&xport->uuid);
1411
1412 xlate_xport_init(new_xcfg, xport);
1413 }
1414
1415 ovs_assert(xport->ofp_port == ofp_port);
1416
1417 xlate_xport_set(xport, odp_port, netdev, cfm, bfd, lldp,
1418 stp_port_no, rstp_port, config, state, is_tunnel,
1419 may_enable);
1420
1421 if (xport->peer) {
1422 xport->peer->peer = NULL;
1423 }
1424 xport->peer = xport_lookup(new_xcfg, peer);
1425 if (xport->peer) {
1426 xport->peer->peer = xport;
1427 }
1428
1429 if (xport->xbundle) {
1430 ovs_list_remove(&xport->bundle_node);
1431 }
1432 xport->xbundle = xbundle_lookup(new_xcfg, ofbundle);
1433 if (xport->xbundle) {
1434 ovs_list_insert(&xport->xbundle->xports, &xport->bundle_node);
1435 }
1436
1437 clear_skb_priorities(xport);
1438 for (i = 0; i < n_qdscp; i++) {
1439 struct skb_priority_to_dscp *pdscp;
1440 uint32_t skb_priority;
1441
1442 if (dpif_queue_to_priority(xport->xbridge->dpif, qdscp_list[i].queue,
1443 &skb_priority)) {
1444 continue;
1445 }
1446
1447 pdscp = xmalloc(sizeof *pdscp);
1448 pdscp->skb_priority = skb_priority;
1449 pdscp->dscp = (qdscp_list[i].dscp << 2) & IP_DSCP_MASK;
1450 hmap_insert(&xport->skb_priorities, &pdscp->hmap_node,
1451 hash_int(pdscp->skb_priority, 0));
1452 }
1453 }
1454
1455 static void
1456 xlate_xport_remove(struct xlate_cfg *xcfg, struct xport *xport)
1457 {
1458 if (!xport) {
1459 return;
1460 }
1461
1462 if (xport->peer) {
1463 xport->peer->peer = NULL;
1464 xport->peer = NULL;
1465 }
1466
1467 if (xport->xbundle) {
1468 ovs_list_remove(&xport->bundle_node);
1469 }
1470
1471 clear_skb_priorities(xport);
1472 hmap_destroy(&xport->skb_priorities);
1473
1474 hmap_remove(&xcfg->xports, &xport->hmap_node);
1475 hmap_remove(&xcfg->xports_uuid, &xport->uuid_node);
1476 hmap_remove(&xport->xbridge->xports, &xport->ofp_node);
1477
1478 netdev_close(xport->netdev);
1479 rstp_port_unref(xport->rstp_port);
1480 cfm_unref(xport->cfm);
1481 bfd_unref(xport->bfd);
1482 lldp_unref(xport->lldp);
1483 free(xport);
1484 }
1485
1486 void
1487 xlate_ofport_remove(struct ofport_dpif *ofport)
1488 {
1489 struct xport *xport;
1490
1491 ovs_assert(new_xcfg);
1492
1493 xport = xport_lookup(new_xcfg, ofport);
1494 if (xport) {
1495 tnl_neigh_flush(netdev_get_name(xport->netdev));
1496 }
1497 xlate_xport_remove(new_xcfg, xport);
1498 }
1499
1500 static struct ofproto_dpif *
1501 xlate_lookup_ofproto_(const struct dpif_backer *backer,
1502 const struct flow *flow,
1503 ofp_port_t *ofp_in_port, const struct xport **xportp,
1504 char **errorp)
1505 {
1506 struct xlate_cfg *xcfg = ovsrcu_get(struct xlate_cfg *, &xcfgp);
1507 const struct xport *xport;
1508
1509 /* If packet is recirculated, xport can be retrieved from frozen state. */
1510 if (flow->recirc_id) {
1511 const struct recirc_id_node *recirc_id_node;
1512
1513 recirc_id_node = recirc_id_node_find(flow->recirc_id);
1514
1515 if (OVS_UNLIKELY(!recirc_id_node)) {
1516 if (errorp) {
1517 *errorp = xasprintf("no recirculation data for recirc_id "
1518 "%"PRIu32, flow->recirc_id);
1519 }
1520 return NULL;
1521 }
1522
1523 ofp_port_t in_port = recirc_id_node->state.metadata.in_port;
1524 if (in_port != OFPP_NONE && in_port != OFPP_CONTROLLER) {
1525 struct uuid xport_uuid = recirc_id_node->state.xport_uuid;
1526 xport = xport_lookup_by_uuid(xcfg, &xport_uuid);
1527 if (xport && xport->xbridge && xport->xbridge->ofproto) {
1528 goto out;
1529 }
1530 } else {
1531 /* OFPP_NONE and OFPP_CONTROLLER are not real ports. They indicate
1532 * that the packet originated from the controller via an OpenFlow
1533 * "packet-out". The right thing to do is to find just the
1534 * ofproto. There is no xport, which is OK.
1535 *
1536 * OFPP_NONE can also indicate that a bond caused recirculation. */
1537 struct uuid uuid = recirc_id_node->state.ofproto_uuid;
1538 const struct xbridge *bridge = xbridge_lookup_by_uuid(xcfg, &uuid);
1539 if (bridge && bridge->ofproto) {
1540 if (errorp) {
1541 *errorp = NULL;
1542 }
1543 *xportp = NULL;
1544 if (ofp_in_port) {
1545 *ofp_in_port = in_port;
1546 }
1547 return bridge->ofproto;
1548 }
1549 }
1550 }
1551
1552 xport = xport_lookup(xcfg, tnl_port_should_receive(flow)
1553 ? tnl_port_receive(flow)
1554 : odp_port_to_ofport(backer, flow->in_port.odp_port));
1555 if (OVS_UNLIKELY(!xport)) {
1556 if (errorp) {
1557 *errorp = (tnl_port_should_receive(flow)
1558 ? xstrdup("no OpenFlow tunnel port for this packet")
1559 : xasprintf("no OpenFlow tunnel port for datapath "
1560 "port %"PRIu32, flow->in_port.odp_port));
1561 }
1562 return NULL;
1563 }
1564
1565 out:
1566 if (errorp) {
1567 *errorp = NULL;
1568 }
1569 *xportp = xport;
1570 if (ofp_in_port) {
1571 *ofp_in_port = xport->ofp_port;
1572 }
1573 return xport->xbridge->ofproto;
1574 }
1575
1576 /* Given a datapath and flow metadata ('backer', and 'flow' respectively)
1577 * returns the corresponding struct ofproto_dpif and OpenFlow port number. */
1578 struct ofproto_dpif *
1579 xlate_lookup_ofproto(const struct dpif_backer *backer, const struct flow *flow,
1580 ofp_port_t *ofp_in_port, char **errorp)
1581 {
1582 const struct xport *xport;
1583
1584 return xlate_lookup_ofproto_(backer, flow, ofp_in_port, &xport, errorp);
1585 }
1586
1587 /* Given a datapath and flow metadata ('backer', and 'flow' respectively),
1588 * optionally populates 'ofprotop' with the ofproto_dpif, 'ofp_in_port' with the
1589 * openflow in_port, and 'ipfix', 'sflow', and 'netflow' with the appropriate
1590 * handles for those protocols if they're enabled. Caller may use the returned
1591 * pointers until quiescing, for longer term use additional references must
1592 * be taken.
1593 *
1594 * Returns 0 if successful, ENODEV if the parsed flow has no associated ofproto.
1595 */
1596 int
1597 xlate_lookup(const struct dpif_backer *backer, const struct flow *flow,
1598 struct ofproto_dpif **ofprotop, struct dpif_ipfix **ipfix,
1599 struct dpif_sflow **sflow, struct netflow **netflow,
1600 ofp_port_t *ofp_in_port)
1601 {
1602 struct ofproto_dpif *ofproto;
1603 const struct xport *xport;
1604
1605 ofproto = xlate_lookup_ofproto_(backer, flow, ofp_in_port, &xport, NULL);
1606
1607 if (!ofproto) {
1608 return ENODEV;
1609 }
1610
1611 if (ofprotop) {
1612 *ofprotop = ofproto;
1613 }
1614
1615 if (ipfix) {
1616 *ipfix = xport ? xport->xbridge->ipfix : NULL;
1617 }
1618
1619 if (sflow) {
1620 *sflow = xport ? xport->xbridge->sflow : NULL;
1621 }
1622
1623 if (netflow) {
1624 *netflow = xport ? xport->xbridge->netflow : NULL;
1625 }
1626
1627 return 0;
1628 }
1629
1630 static struct xbridge *
1631 xbridge_lookup(struct xlate_cfg *xcfg, const struct ofproto_dpif *ofproto)
1632 {
1633 struct hmap *xbridges;
1634 struct xbridge *xbridge;
1635
1636 if (!ofproto || !xcfg) {
1637 return NULL;
1638 }
1639
1640 xbridges = &xcfg->xbridges;
1641
1642 HMAP_FOR_EACH_IN_BUCKET (xbridge, hmap_node, hash_pointer(ofproto, 0),
1643 xbridges) {
1644 if (xbridge->ofproto == ofproto) {
1645 return xbridge;
1646 }
1647 }
1648 return NULL;
1649 }
1650
1651 static struct xbridge *
1652 xbridge_lookup_by_uuid(struct xlate_cfg *xcfg, const struct uuid *uuid)
1653 {
1654 struct xbridge *xbridge;
1655
1656 HMAP_FOR_EACH (xbridge, hmap_node, &xcfg->xbridges) {
1657 if (uuid_equals(&xbridge->ofproto->uuid, uuid)) {
1658 return xbridge;
1659 }
1660 }
1661 return NULL;
1662 }
1663
1664 static struct xbundle *
1665 xbundle_lookup(struct xlate_cfg *xcfg, const struct ofbundle *ofbundle)
1666 {
1667 struct hmap *xbundles;
1668 struct xbundle *xbundle;
1669
1670 if (!ofbundle || !xcfg) {
1671 return NULL;
1672 }
1673
1674 xbundles = &xcfg->xbundles;
1675
1676 HMAP_FOR_EACH_IN_BUCKET (xbundle, hmap_node, hash_pointer(ofbundle, 0),
1677 xbundles) {
1678 if (xbundle->ofbundle == ofbundle) {
1679 return xbundle;
1680 }
1681 }
1682 return NULL;
1683 }
1684
1685 static struct xport *
1686 xport_lookup(struct xlate_cfg *xcfg, const struct ofport_dpif *ofport)
1687 {
1688 struct hmap *xports;
1689 struct xport *xport;
1690
1691 if (!ofport || !xcfg) {
1692 return NULL;
1693 }
1694
1695 xports = &xcfg->xports;
1696
1697 HMAP_FOR_EACH_IN_BUCKET (xport, hmap_node, hash_pointer(ofport, 0),
1698 xports) {
1699 if (xport->ofport == ofport) {
1700 return xport;
1701 }
1702 }
1703 return NULL;
1704 }
1705
1706 static struct xport *
1707 xport_lookup_by_uuid(struct xlate_cfg *xcfg, const struct uuid *uuid)
1708 {
1709 struct hmap *xports;
1710 struct xport *xport;
1711
1712 if (uuid_is_zero(uuid) || !xcfg) {
1713 return NULL;
1714 }
1715
1716 xports = &xcfg->xports_uuid;
1717
1718 HMAP_FOR_EACH_IN_BUCKET (xport, uuid_node, uuid_hash(uuid), xports) {
1719 if (uuid_equals(&xport->uuid, uuid)) {
1720 return xport;
1721 }
1722 }
1723 return NULL;
1724 }
1725
1726 static struct stp_port *
1727 xport_get_stp_port(const struct xport *xport)
1728 {
1729 return xport->xbridge->stp && xport->stp_port_no != -1
1730 ? stp_get_port(xport->xbridge->stp, xport->stp_port_no)
1731 : NULL;
1732 }
1733
1734 static bool
1735 xport_stp_learn_state(const struct xport *xport)
1736 {
1737 struct stp_port *sp = xport_get_stp_port(xport);
1738 return sp
1739 ? stp_learn_in_state(stp_port_get_state(sp))
1740 : true;
1741 }
1742
1743 static bool
1744 xport_stp_forward_state(const struct xport *xport)
1745 {
1746 struct stp_port *sp = xport_get_stp_port(xport);
1747 return sp
1748 ? stp_forward_in_state(stp_port_get_state(sp))
1749 : true;
1750 }
1751
1752 static bool
1753 xport_stp_should_forward_bpdu(const struct xport *xport)
1754 {
1755 struct stp_port *sp = xport_get_stp_port(xport);
1756 return stp_should_forward_bpdu(sp ? stp_port_get_state(sp) : STP_DISABLED);
1757 }
1758
1759 /* Returns true if STP should process 'flow'. Sets fields in 'wc' that
1760 * were used to make the determination.*/
1761 static bool
1762 stp_should_process_flow(const struct flow *flow, struct flow_wildcards *wc)
1763 {
1764 /* is_stp() also checks dl_type, but dl_type is always set in 'wc'. */
1765 memset(&wc->masks.dl_dst, 0xff, sizeof wc->masks.dl_dst);
1766 return is_stp(flow);
1767 }
1768
1769 static void
1770 stp_process_packet(const struct xport *xport, const struct dp_packet *packet)
1771 {
1772 struct stp_port *sp = xport_get_stp_port(xport);
1773 struct dp_packet payload = *packet;
1774 struct eth_header *eth = dp_packet_data(&payload);
1775
1776 /* Sink packets on ports that have STP disabled when the bridge has
1777 * STP enabled. */
1778 if (!sp || stp_port_get_state(sp) == STP_DISABLED) {
1779 return;
1780 }
1781
1782 /* Trim off padding on payload. */
1783 if (dp_packet_size(&payload) > ntohs(eth->eth_type) + ETH_HEADER_LEN) {
1784 dp_packet_set_size(&payload, ntohs(eth->eth_type) + ETH_HEADER_LEN);
1785 }
1786
1787 if (dp_packet_try_pull(&payload, ETH_HEADER_LEN + LLC_HEADER_LEN)) {
1788 stp_received_bpdu(sp, dp_packet_data(&payload), dp_packet_size(&payload));
1789 }
1790 }
1791
1792 static enum rstp_state
1793 xport_get_rstp_port_state(const struct xport *xport)
1794 {
1795 return xport->rstp_port
1796 ? rstp_port_get_state(xport->rstp_port)
1797 : RSTP_DISABLED;
1798 }
1799
1800 static bool
1801 xport_rstp_learn_state(const struct xport *xport)
1802 {
1803 return xport->xbridge->rstp && xport->rstp_port
1804 ? rstp_learn_in_state(xport_get_rstp_port_state(xport))
1805 : true;
1806 }
1807
1808 static bool
1809 xport_rstp_forward_state(const struct xport *xport)
1810 {
1811 return xport->xbridge->rstp && xport->rstp_port
1812 ? rstp_forward_in_state(xport_get_rstp_port_state(xport))
1813 : true;
1814 }
1815
1816 static bool
1817 xport_rstp_should_manage_bpdu(const struct xport *xport)
1818 {
1819 return rstp_should_manage_bpdu(xport_get_rstp_port_state(xport));
1820 }
1821
1822 static void
1823 rstp_process_packet(const struct xport *xport, const struct dp_packet *packet)
1824 {
1825 struct dp_packet payload = *packet;
1826 struct eth_header *eth = dp_packet_data(&payload);
1827
1828 /* Sink packets on ports that have no RSTP. */
1829 if (!xport->rstp_port) {
1830 return;
1831 }
1832
1833 /* Trim off padding on payload. */
1834 if (dp_packet_size(&payload) > ntohs(eth->eth_type) + ETH_HEADER_LEN) {
1835 dp_packet_set_size(&payload, ntohs(eth->eth_type) + ETH_HEADER_LEN);
1836 }
1837
1838 int len = ETH_HEADER_LEN + LLC_HEADER_LEN;
1839 if (eth->eth_type == htons(ETH_TYPE_VLAN)) {
1840 len += VLAN_HEADER_LEN;
1841 }
1842 if (dp_packet_try_pull(&payload, len)) {
1843 rstp_port_received_bpdu(xport->rstp_port, dp_packet_data(&payload),
1844 dp_packet_size(&payload));
1845 }
1846 }
1847
1848 static struct xport *
1849 get_ofp_port(const struct xbridge *xbridge, ofp_port_t ofp_port)
1850 {
1851 struct xport *xport;
1852
1853 HMAP_FOR_EACH_IN_BUCKET (xport, ofp_node, hash_ofp_port(ofp_port),
1854 &xbridge->xports) {
1855 if (xport->ofp_port == ofp_port) {
1856 return xport;
1857 }
1858 }
1859 return NULL;
1860 }
1861
1862 static odp_port_t
1863 ofp_port_to_odp_port(const struct xbridge *xbridge, ofp_port_t ofp_port)
1864 {
1865 const struct xport *xport = get_ofp_port(xbridge, ofp_port);
1866 return xport ? xport->odp_port : ODPP_NONE;
1867 }
1868
1869 static bool
1870 odp_port_is_alive(const struct xlate_ctx *ctx, ofp_port_t ofp_port)
1871 {
1872 struct xport *xport = get_ofp_port(ctx->xbridge, ofp_port);
1873 return xport && xport->may_enable;
1874 }
1875
1876 static struct ofputil_bucket *
1877 group_first_live_bucket(const struct xlate_ctx *, const struct group_dpif *,
1878 int depth);
1879
1880 static bool
1881 group_is_alive(const struct xlate_ctx *ctx, uint32_t group_id, int depth)
1882 {
1883 struct group_dpif *group;
1884
1885 group = group_dpif_lookup(ctx->xbridge->ofproto, group_id,
1886 ctx->xin->tables_version, false);
1887 if (group) {
1888 return group_first_live_bucket(ctx, group, depth) != NULL;
1889 }
1890
1891 return false;
1892 }
1893
1894 #define MAX_LIVENESS_RECURSION 128 /* Arbitrary limit */
1895
1896 static bool
1897 bucket_is_alive(const struct xlate_ctx *ctx,
1898 struct ofputil_bucket *bucket, int depth)
1899 {
1900 if (depth >= MAX_LIVENESS_RECURSION) {
1901 xlate_report_error(ctx, "bucket chaining exceeded %d links",
1902 MAX_LIVENESS_RECURSION);
1903 return false;
1904 }
1905
1906 return (!ofputil_bucket_has_liveness(bucket)
1907 || (bucket->watch_port != OFPP_ANY
1908 && bucket->watch_port != OFPP_CONTROLLER
1909 && odp_port_is_alive(ctx, bucket->watch_port))
1910 || (bucket->watch_group != OFPG_ANY
1911 && group_is_alive(ctx, bucket->watch_group, depth + 1))
1912 || (bucket->watch_port == OFPP_CONTROLLER
1913 && ofproto_is_alive(&ctx->xbridge->ofproto->up)));
1914 }
1915
1916 static void
1917 xlate_report_bucket_not_live(const struct xlate_ctx *ctx,
1918 const struct ofputil_bucket *bucket)
1919 {
1920 if (OVS_UNLIKELY(ctx->xin->trace)) {
1921 struct ds s = DS_EMPTY_INITIALIZER;
1922 if (bucket->watch_port != OFPP_ANY) {
1923 ds_put_cstr(&s, "port ");
1924 ofputil_format_port(bucket->watch_port, NULL, &s);
1925 }
1926 if (bucket->watch_group != OFPG_ANY) {
1927 if (s.length) {
1928 ds_put_cstr(&s, " and ");
1929 }
1930 ds_put_format(&s, "port %"PRIu32, bucket->watch_group);
1931 }
1932
1933 xlate_report(ctx, OFT_DETAIL, "bucket %"PRIu32": not live due to %s",
1934 bucket->bucket_id, ds_cstr(&s));
1935
1936 ds_destroy(&s);
1937 }
1938 }
1939
1940 static struct ofputil_bucket *
1941 group_first_live_bucket(const struct xlate_ctx *ctx,
1942 const struct group_dpif *group, int depth)
1943 {
1944 struct ofputil_bucket *bucket;
1945 LIST_FOR_EACH (bucket, list_node, &group->up.buckets) {
1946 if (bucket_is_alive(ctx, bucket, depth)) {
1947 return bucket;
1948 }
1949 xlate_report_bucket_not_live(ctx, bucket);
1950 }
1951
1952 return NULL;
1953 }
1954
1955 static struct ofputil_bucket *
1956 group_best_live_bucket(const struct xlate_ctx *ctx,
1957 const struct group_dpif *group,
1958 uint32_t basis)
1959 {
1960 struct ofputil_bucket *best_bucket = NULL;
1961 uint32_t best_score = 0;
1962
1963 struct ofputil_bucket *bucket;
1964 LIST_FOR_EACH (bucket, list_node, &group->up.buckets) {
1965 if (bucket_is_alive(ctx, bucket, 0)) {
1966 uint32_t score =
1967 (hash_int(bucket->bucket_id, basis) & 0xffff) * bucket->weight;
1968 if (score >= best_score) {
1969 best_bucket = bucket;
1970 best_score = score;
1971 }
1972 xlate_report(ctx, OFT_DETAIL, "bucket %"PRIu32": score %"PRIu32,
1973 bucket->bucket_id, score);
1974 } else {
1975 xlate_report_bucket_not_live(ctx, bucket);
1976 }
1977 }
1978
1979 return best_bucket;
1980 }
1981
1982 static bool
1983 xbundle_trunks_vlan(const struct xbundle *bundle, uint16_t vlan)
1984 {
1985 return (bundle->vlan_mode != PORT_VLAN_ACCESS
1986 && (!bundle->trunks || bitmap_is_set(bundle->trunks, vlan)));
1987 }
1988
1989 static bool
1990 xbundle_allows_cvlan(const struct xbundle *bundle, uint16_t vlan)
1991 {
1992 return (!bundle->cvlans || bitmap_is_set(bundle->cvlans, vlan));
1993 }
1994
1995 static bool
1996 xbundle_includes_vlan(const struct xbundle *xbundle, const struct xvlan *xvlan)
1997 {
1998 switch (xbundle->vlan_mode) {
1999 case PORT_VLAN_ACCESS:
2000 return xvlan->v[0].vid == xbundle->vlan && xvlan->v[1].vid == 0;
2001
2002 case PORT_VLAN_TRUNK:
2003 case PORT_VLAN_NATIVE_UNTAGGED:
2004 case PORT_VLAN_NATIVE_TAGGED:
2005 return xbundle_trunks_vlan(xbundle, xvlan->v[0].vid);
2006
2007 case PORT_VLAN_DOT1Q_TUNNEL:
2008 return xvlan->v[0].vid == xbundle->vlan &&
2009 xbundle_allows_cvlan(xbundle, xvlan->v[1].vid);
2010
2011 default:
2012 OVS_NOT_REACHED();
2013 }
2014 }
2015
2016 static mirror_mask_t
2017 xbundle_mirror_out(const struct xbridge *xbridge, struct xbundle *xbundle)
2018 {
2019 return xbundle != &ofpp_none_bundle
2020 ? mirror_bundle_out(xbridge->mbridge, xbundle->ofbundle)
2021 : 0;
2022 }
2023
2024 static mirror_mask_t
2025 xbundle_mirror_src(const struct xbridge *xbridge, struct xbundle *xbundle)
2026 {
2027 return xbundle != &ofpp_none_bundle
2028 ? mirror_bundle_src(xbridge->mbridge, xbundle->ofbundle)
2029 : 0;
2030 }
2031
2032 static mirror_mask_t
2033 xbundle_mirror_dst(const struct xbridge *xbridge, struct xbundle *xbundle)
2034 {
2035 return xbundle != &ofpp_none_bundle
2036 ? mirror_bundle_dst(xbridge->mbridge, xbundle->ofbundle)
2037 : 0;
2038 }
2039
2040 static struct xbundle *
2041 lookup_input_bundle__(const struct xbridge *xbridge,
2042 ofp_port_t in_port, struct xport **in_xportp)
2043 {
2044 struct xport *xport;
2045
2046 /* Find the port and bundle for the received packet. */
2047 xport = get_ofp_port(xbridge, in_port);
2048 if (in_xportp) {
2049 *in_xportp = xport;
2050 }
2051 if (xport && xport->xbundle) {
2052 return xport->xbundle;
2053 }
2054
2055 /* Special-case OFPP_NONE (OF1.0) and OFPP_CONTROLLER (OF1.1+),
2056 * which a controller may use as the ingress port for traffic that
2057 * it is sourcing. */
2058 if (in_port == OFPP_CONTROLLER || in_port == OFPP_NONE) {
2059 return &ofpp_none_bundle;
2060 }
2061 return NULL;
2062 }
2063
2064 static struct xbundle *
2065 lookup_input_bundle(const struct xlate_ctx *ctx,
2066 ofp_port_t in_port, struct xport **in_xportp)
2067 {
2068 struct xbundle *xbundle = lookup_input_bundle__(ctx->xbridge,
2069 in_port, in_xportp);
2070 if (!xbundle) {
2071 /* Odd. A few possible reasons here:
2072 *
2073 * - We deleted a port but there are still a few packets queued up
2074 * from it.
2075 *
2076 * - Someone externally added a port (e.g. "ovs-dpctl add-if") that
2077 * we don't know about.
2078 *
2079 * - The ofproto client didn't configure the port as part of a bundle.
2080 * This is particularly likely to happen if a packet was received on
2081 * the port after it was created, but before the client had a chance
2082 * to configure its bundle.
2083 */
2084 xlate_report_error(ctx, "received packet on unknown port %"PRIu32,
2085 in_port);
2086 }
2087 return xbundle;
2088 }
2089
2090 /* Mirrors the packet represented by 'ctx' to appropriate mirror destinations,
2091 * given the packet is ingressing or egressing on 'xbundle', which has ingress
2092 * or egress (as appropriate) mirrors 'mirrors'. */
2093 static void
2094 mirror_packet(struct xlate_ctx *ctx, struct xbundle *xbundle,
2095 mirror_mask_t mirrors)
2096 {
2097 struct xvlan in_xvlan;
2098 struct xvlan xvlan;
2099
2100 /* Figure out what VLAN the packet is in (because mirrors can select
2101 * packets on basis of VLAN). */
2102 xvlan_extract(&ctx->xin->flow, &in_xvlan);
2103 if (!input_vid_is_valid(ctx, in_xvlan.v[0].vid, xbundle)) {
2104 return;
2105 }
2106 xvlan_input_translate(xbundle, &in_xvlan, &xvlan);
2107
2108 const struct xbridge *xbridge = ctx->xbridge;
2109
2110 /* Don't mirror to destinations that we've already mirrored to. */
2111 mirrors &= ~ctx->mirrors;
2112 if (!mirrors) {
2113 return;
2114 }
2115
2116 /* 'mirrors' is a bit-mask of candidates for mirroring. Iterate through
2117 * the candidates, adding the ones that really should be mirrored to
2118 * 'used_mirrors', as long as some candidates remain. */
2119 mirror_mask_t used_mirrors = 0;
2120 while (mirrors) {
2121 const unsigned long *vlans;
2122 mirror_mask_t dup_mirrors;
2123 struct ofbundle *out;
2124 int out_vlan;
2125 int snaplen;
2126
2127 /* Get the details of the mirror represented by the rightmost 1-bit. */
2128 ovs_assert(mirror_get(xbridge->mbridge, raw_ctz(mirrors),
2129 &vlans, &dup_mirrors,
2130 &out, &snaplen, &out_vlan));
2131
2132
2133 /* If this mirror selects on the basis of VLAN, and it does not select
2134 * 'vlan', then discard this mirror and go on to the next one. */
2135 if (vlans) {
2136 ctx->wc->masks.vlans[0].tci |= htons(VLAN_CFI | VLAN_VID_MASK);
2137 }
2138 if (vlans && !bitmap_is_set(vlans, xvlan.v[0].vid)) {
2139 mirrors = zero_rightmost_1bit(mirrors);
2140 continue;
2141 }
2142
2143 /* We sent a packet to this mirror. */
2144 used_mirrors |= rightmost_1bit(mirrors);
2145
2146 /* Record the mirror, and the mirrors that output to the same
2147 * destination, so that we don't mirror to them again. This must be
2148 * done now to ensure that output_normal(), below, doesn't recursively
2149 * output to the same mirrors. */
2150 ctx->mirrors |= dup_mirrors;
2151 ctx->mirror_snaplen = snaplen;
2152
2153 /* Send the packet to the mirror. */
2154 if (out) {
2155 struct xbundle *out_xbundle = xbundle_lookup(ctx->xcfg, out);
2156 if (out_xbundle) {
2157 output_normal(ctx, out_xbundle, &xvlan);
2158 }
2159 } else if (xvlan.v[0].vid != out_vlan
2160 && !eth_addr_is_reserved(ctx->xin->flow.dl_dst)) {
2161 struct xbundle *xb;
2162 uint16_t old_vid = xvlan.v[0].vid;
2163
2164 xvlan.v[0].vid = out_vlan;
2165 LIST_FOR_EACH (xb, list_node, &xbridge->xbundles) {
2166 if (xbundle_includes_vlan(xb, &xvlan)
2167 && !xbundle_mirror_out(xbridge, xb)) {
2168 output_normal(ctx, xb, &xvlan);
2169 }
2170 }
2171 xvlan.v[0].vid = old_vid;
2172 }
2173
2174 /* output_normal() could have recursively output (to different
2175 * mirrors), so make sure that we don't send duplicates. */
2176 mirrors &= ~ctx->mirrors;
2177 ctx->mirror_snaplen = 0;
2178 }
2179
2180 if (used_mirrors) {
2181 if (ctx->xin->resubmit_stats) {
2182 mirror_update_stats(xbridge->mbridge, used_mirrors,
2183 ctx->xin->resubmit_stats->n_packets,
2184 ctx->xin->resubmit_stats->n_bytes);
2185 }
2186 if (ctx->xin->xcache) {
2187 struct xc_entry *entry;
2188
2189 entry = xlate_cache_add_entry(ctx->xin->xcache, XC_MIRROR);
2190 entry->mirror.mbridge = mbridge_ref(xbridge->mbridge);
2191 entry->mirror.mirrors = used_mirrors;
2192 }
2193 }
2194 }
2195
2196 static void
2197 mirror_ingress_packet(struct xlate_ctx *ctx)
2198 {
2199 if (mbridge_has_mirrors(ctx->xbridge->mbridge)) {
2200 struct xbundle *xbundle = lookup_input_bundle(
2201 ctx, ctx->xin->flow.in_port.ofp_port, NULL);
2202 if (xbundle) {
2203 mirror_packet(ctx, xbundle,
2204 xbundle_mirror_src(ctx->xbridge, xbundle));
2205 }
2206 }
2207 }
2208
2209 /* Checks whether a packet with the given 'vid' may ingress on 'in_xbundle'.
2210 * If so, returns true. Otherwise, returns false.
2211 *
2212 * 'vid' should be the VID obtained from the 802.1Q header that was received as
2213 * part of a packet (specify 0 if there was no 802.1Q header), in the range
2214 * 0...4095. */
2215 static bool
2216 input_vid_is_valid(const struct xlate_ctx *ctx,
2217 uint16_t vid, struct xbundle *in_xbundle)
2218 {
2219 /* Allow any VID on the OFPP_NONE port. */
2220 if (in_xbundle == &ofpp_none_bundle) {
2221 return true;
2222 }
2223
2224 switch (in_xbundle->vlan_mode) {
2225 case PORT_VLAN_ACCESS:
2226 if (vid) {
2227 xlate_report_error(ctx, "dropping VLAN %"PRIu16" tagged "
2228 "packet received on port %s configured as VLAN "
2229 "%d access port", vid, in_xbundle->name,
2230 in_xbundle->vlan);
2231 return false;
2232 }
2233 return true;
2234
2235 case PORT_VLAN_NATIVE_UNTAGGED:
2236 case PORT_VLAN_NATIVE_TAGGED:
2237 if (!vid) {
2238 /* Port must always carry its native VLAN. */
2239 return true;
2240 }
2241 /* Fall through. */
2242 case PORT_VLAN_TRUNK:
2243 if (!xbundle_trunks_vlan(in_xbundle, vid)) {
2244 xlate_report_error(ctx, "dropping VLAN %"PRIu16" packet "
2245 "received on port %s not configured for "
2246 "trunking VLAN %"PRIu16,
2247 vid, in_xbundle->name, vid);
2248 return false;
2249 }
2250 return true;
2251
2252 case PORT_VLAN_DOT1Q_TUNNEL:
2253 if (!xbundle_allows_cvlan(in_xbundle, vid)) {
2254 xlate_report_error(ctx, "dropping VLAN %"PRIu16" packet received "
2255 "on dot1q-tunnel port %s that excludes this "
2256 "VLAN", vid, in_xbundle->name);
2257 return false;
2258 }
2259 return true;
2260
2261 default:
2262 OVS_NOT_REACHED();
2263 }
2264
2265 }
2266
2267 static void
2268 xvlan_copy(struct xvlan *dst, const struct xvlan *src)
2269 {
2270 *dst = *src;
2271 }
2272
2273 static void
2274 xvlan_pop(struct xvlan *src)
2275 {
2276 memmove(&src->v[0], &src->v[1], sizeof(src->v) - sizeof(src->v[0]));
2277 memset(&src->v[FLOW_MAX_VLAN_HEADERS - 1], 0,
2278 sizeof(src->v[FLOW_MAX_VLAN_HEADERS - 1]));
2279 }
2280
2281 static void
2282 xvlan_push_uninit(struct xvlan *src)
2283 {
2284 memmove(&src->v[1], &src->v[0], sizeof(src->v) - sizeof(src->v[0]));
2285 memset(&src->v[0], 0, sizeof(src->v[0]));
2286 }
2287
2288 /* Extract VLAN information (headers) from flow */
2289 static void
2290 xvlan_extract(const struct flow *flow, struct xvlan *xvlan)
2291 {
2292 int i;
2293 memset(xvlan, 0, sizeof(*xvlan));
2294 for (i = 0; i < FLOW_MAX_VLAN_HEADERS; i++) {
2295 if (!eth_type_vlan(flow->vlans[i].tpid) ||
2296 !(flow->vlans[i].tci & htons(VLAN_CFI))) {
2297 break;
2298 }
2299 xvlan->v[i].tpid = ntohs(flow->vlans[i].tpid);
2300 xvlan->v[i].vid = vlan_tci_to_vid(flow->vlans[i].tci);
2301 xvlan->v[i].pcp = ntohs(flow->vlans[i].tci) & VLAN_PCP_MASK;
2302 }
2303 }
2304
2305 /* Put VLAN information (headers) to flow */
2306 static void
2307 xvlan_put(struct flow *flow, const struct xvlan *xvlan,
2308 enum port_priority_tags_mode use_priority_tags)
2309 {
2310 ovs_be16 tci;
2311 int i;
2312 for (i = 0; i < FLOW_MAX_VLAN_HEADERS; i++) {
2313 tci = htons(xvlan->v[i].vid | (xvlan->v[i].pcp & VLAN_PCP_MASK));
2314 if (tci || ((use_priority_tags == PORT_PRIORITY_TAGS_ALWAYS) &&
2315 xvlan->v[i].tpid)) {
2316 tci |= htons(VLAN_CFI);
2317 flow->vlans[i].tpid = xvlan->v[i].tpid ?
2318 htons(xvlan->v[i].tpid) :
2319 htons(ETH_TYPE_VLAN_8021Q);
2320 }
2321 flow->vlans[i].tci = tci;
2322 }
2323 }
2324
2325 /* Given 'in_xvlan', extracted from the input 802.1Q headers received as part
2326 * of a packet, and 'in_xbundle', the bundle on which the packet was received,
2327 * returns the VLANs of the packet during bridge internal processing. */
2328 static void
2329 xvlan_input_translate(const struct xbundle *in_xbundle,
2330 const struct xvlan *in_xvlan, struct xvlan *xvlan)
2331 {
2332
2333 switch (in_xbundle->vlan_mode) {
2334 case PORT_VLAN_ACCESS:
2335 memset(xvlan, 0, sizeof(*xvlan));
2336 xvlan->v[0].tpid = in_xvlan->v[0].tpid ? in_xvlan->v[0].tpid :
2337 ETH_TYPE_VLAN_8021Q;
2338 xvlan->v[0].vid = in_xbundle->vlan;
2339 xvlan->v[0].pcp = in_xvlan->v[0].pcp;
2340 break;
2341
2342 case PORT_VLAN_TRUNK:
2343 xvlan_copy(xvlan, in_xvlan);
2344 break;
2345
2346 case PORT_VLAN_NATIVE_UNTAGGED:
2347 case PORT_VLAN_NATIVE_TAGGED:
2348 xvlan_copy(xvlan, in_xvlan);
2349 if (!in_xvlan->v[0].vid) {
2350 xvlan->v[0].tpid = in_xvlan->v[0].tpid ? in_xvlan->v[0].tpid :
2351 ETH_TYPE_VLAN_8021Q;
2352 xvlan->v[0].vid = in_xbundle->vlan;
2353 xvlan->v[0].pcp = in_xvlan->v[0].pcp;
2354 }
2355 break;
2356
2357 case PORT_VLAN_DOT1Q_TUNNEL:
2358 xvlan_copy(xvlan, in_xvlan);
2359 xvlan_push_uninit(xvlan);
2360 xvlan->v[0].tpid = in_xbundle->qinq_ethtype;
2361 xvlan->v[0].vid = in_xbundle->vlan;
2362 xvlan->v[0].pcp = 0;
2363 break;
2364
2365 default:
2366 OVS_NOT_REACHED();
2367 }
2368 }
2369
2370 /* Given 'xvlan', the VLANs of a packet during internal processing, and
2371 * 'out_xbundle', a bundle on which the packet is to be output, returns the
2372 * VLANs that should be included in output packet. */
2373 static void
2374 xvlan_output_translate(const struct xbundle *out_xbundle,
2375 const struct xvlan *xvlan, struct xvlan *out_xvlan)
2376 {
2377 switch (out_xbundle->vlan_mode) {
2378 case PORT_VLAN_ACCESS:
2379 memset(out_xvlan, 0, sizeof(*out_xvlan));
2380 break;
2381
2382 case PORT_VLAN_TRUNK:
2383 case PORT_VLAN_NATIVE_TAGGED:
2384 xvlan_copy(out_xvlan, xvlan);
2385 break;
2386
2387 case PORT_VLAN_NATIVE_UNTAGGED:
2388 xvlan_copy(out_xvlan, xvlan);
2389 if (xvlan->v[0].vid == out_xbundle->vlan) {
2390 xvlan_pop(out_xvlan);
2391 }
2392 break;
2393
2394 case PORT_VLAN_DOT1Q_TUNNEL:
2395 xvlan_copy(out_xvlan, xvlan);
2396 xvlan_pop(out_xvlan);
2397 break;
2398
2399 default:
2400 OVS_NOT_REACHED();
2401 }
2402 }
2403
2404 /* If output xbundle is dot1q-tunnel, set mask bits of cvlan */
2405 static void
2406 check_and_set_cvlan_mask(struct flow_wildcards *wc,
2407 const struct xbundle *xbundle)
2408 {
2409 if (xbundle->vlan_mode == PORT_VLAN_DOT1Q_TUNNEL && xbundle->cvlans) {
2410 wc->masks.vlans[1].tci = htons(0xffff);
2411 }
2412 }
2413
2414 static void
2415 output_normal(struct xlate_ctx *ctx, const struct xbundle *out_xbundle,
2416 const struct xvlan *xvlan)
2417 {
2418 uint16_t vid;
2419 union flow_vlan_hdr old_vlans[FLOW_MAX_VLAN_HEADERS];
2420 struct xport *xport;
2421 struct xlate_bond_recirc xr;
2422 bool use_recirc = false;
2423 struct xvlan out_xvlan;
2424
2425 check_and_set_cvlan_mask(ctx->wc, out_xbundle);
2426
2427 xvlan_output_translate(out_xbundle, xvlan, &out_xvlan);
2428 if (out_xbundle->use_priority_tags) {
2429 out_xvlan.v[0].pcp = ntohs(ctx->xin->flow.vlans[0].tci) &
2430 VLAN_PCP_MASK;
2431 }
2432 vid = out_xvlan.v[0].vid;
2433 if (ovs_list_is_empty(&out_xbundle->xports)) {
2434 /* Partially configured bundle with no members. Drop the packet. */
2435 return;
2436 } else if (!out_xbundle->bond) {
2437 xport = CONTAINER_OF(ovs_list_front(&out_xbundle->xports), struct xport,
2438 bundle_node);
2439 } else {
2440 struct flow_wildcards *wc = ctx->wc;
2441 struct ofport_dpif *ofport;
2442
2443 if (ctx->xbridge->support.odp.recirc) {
2444 /* In case recirculation is not actually in use, 'xr.recirc_id'
2445 * will be set to '0', since a valid 'recirc_id' can
2446 * not be zero. */
2447 bond_update_post_recirc_rules(out_xbundle->bond,
2448 &xr.recirc_id,
2449 &xr.hash_basis);
2450 if (xr.recirc_id) {
2451 /* Use recirculation instead of output. */
2452 use_recirc = true;
2453 xr.hash_alg = OVS_HASH_ALG_L4;
2454 /* Recirculation does not require unmasking hash fields. */
2455 wc = NULL;
2456 }
2457 }
2458
2459 ofport = bond_choose_output_member(out_xbundle->bond,
2460 &ctx->xin->flow, wc, vid);
2461 xport = xport_lookup(ctx->xcfg, ofport);
2462
2463 if (!xport) {
2464 /* No member interfaces enabled, so drop packet. */
2465 return;
2466 }
2467
2468 /* If use_recirc is set, the main thread will handle stats
2469 * accounting for this bond. */
2470 if (!use_recirc) {
2471 if (ctx->xin->resubmit_stats) {
2472 bond_account(out_xbundle->bond, &ctx->xin->flow, vid,
2473 ctx->xin->resubmit_stats->n_bytes);
2474 }
2475 if (ctx->xin->xcache) {
2476 struct xc_entry *entry;
2477 struct flow *flow;
2478
2479 flow = &ctx->xin->flow;
2480 entry = xlate_cache_add_entry(ctx->xin->xcache, XC_BOND);
2481 entry->bond.bond = bond_ref(out_xbundle->bond);
2482 entry->bond.flow = xmemdup(flow, sizeof *flow);
2483 entry->bond.vid = vid;
2484 }
2485 }
2486 }
2487
2488 memcpy(&old_vlans, &ctx->xin->flow.vlans, sizeof(old_vlans));
2489 xvlan_put(&ctx->xin->flow, &out_xvlan, out_xbundle->use_priority_tags);
2490
2491 compose_output_action(ctx, xport->ofp_port, use_recirc ? &xr : NULL,
2492 false, false);
2493 memcpy(&ctx->xin->flow.vlans, &old_vlans, sizeof(old_vlans));
2494 }
2495
2496 /* A VM broadcasts a gratuitous ARP to indicate that it has resumed after
2497 * migration. Older Citrix-patched Linux DomU used gratuitous ARP replies to
2498 * indicate this; newer upstream kernels use gratuitous ARP requests. */
2499 static bool
2500 is_gratuitous_arp(const struct flow *flow, struct flow_wildcards *wc)
2501 {
2502 if (flow->dl_type != htons(ETH_TYPE_ARP)) {
2503 return false;
2504 }
2505
2506 memset(&wc->masks.dl_dst, 0xff, sizeof wc->masks.dl_dst);
2507 if (!eth_addr_is_broadcast(flow->dl_dst)) {
2508 return false;
2509 }
2510
2511 memset(&wc->masks.nw_proto, 0xff, sizeof wc->masks.nw_proto);
2512 if (flow->nw_proto == ARP_OP_REPLY) {
2513 return true;
2514 } else if (flow->nw_proto == ARP_OP_REQUEST) {
2515 memset(&wc->masks.nw_src, 0xff, sizeof wc->masks.nw_src);
2516 memset(&wc->masks.nw_dst, 0xff, sizeof wc->masks.nw_dst);
2517
2518 return flow->nw_src == flow->nw_dst;
2519 } else {
2520 return false;
2521 }
2522 }
2523
2524 /* Determines whether packets in 'flow' within 'xbridge' should be forwarded or
2525 * dropped. Returns true if they may be forwarded, false if they should be
2526 * dropped.
2527 *
2528 * 'in_port' must be the xport that corresponds to flow->in_port.
2529 * 'in_port' must be part of a bundle (e.g. in_port->bundle must be nonnull).
2530 *
2531 * 'vlan' must be the VLAN that corresponds to flow->vlan_tci on 'in_port', as
2532 * returned by input_vid_to_vlan(). It must be a valid VLAN for 'in_port', as
2533 * checked by input_vid_is_valid().
2534 *
2535 * May also add tags to '*tags', although the current implementation only does
2536 * so in one special case.
2537 */
2538 static bool
2539 is_admissible(struct xlate_ctx *ctx, struct xport *in_port,
2540 uint16_t vlan)
2541 {
2542 struct xbundle *in_xbundle = in_port->xbundle;
2543 const struct xbridge *xbridge = ctx->xbridge;
2544 struct flow *flow = &ctx->xin->flow;
2545
2546 /* Drop frames for reserved multicast addresses
2547 * only if forward_bpdu option is absent. */
2548 if (!xbridge->forward_bpdu && eth_addr_is_reserved(flow->dl_dst)) {
2549 xlate_report(ctx, OFT_DETAIL,
2550 "packet has reserved destination MAC, dropping");
2551 return false;
2552 }
2553
2554 if (in_xbundle->bond) {
2555 struct mac_entry *mac;
2556
2557 switch (bond_check_admissibility(in_xbundle->bond, in_port->ofport,
2558 flow->dl_dst)) {
2559 case BV_ACCEPT:
2560 break;
2561
2562 case BV_DROP:
2563 xlate_report(ctx, OFT_DETAIL,
2564 "bonding refused admissibility, dropping");
2565 return false;
2566
2567 case BV_DROP_IF_MOVED:
2568 ovs_rwlock_rdlock(&xbridge->ml->rwlock);
2569 mac = mac_learning_lookup(xbridge->ml, flow->dl_src, vlan);
2570 if (mac
2571 && mac_entry_get_port(xbridge->ml, mac) != in_xbundle->ofbundle
2572 && (!is_gratuitous_arp(flow, ctx->wc)
2573 || mac_entry_is_grat_arp_locked(mac))) {
2574 ovs_rwlock_unlock(&xbridge->ml->rwlock);
2575 xlate_report(ctx, OFT_DETAIL,
2576 "SLB bond thinks this packet looped back, "
2577 "dropping");
2578 return false;
2579 }
2580 ovs_rwlock_unlock(&xbridge->ml->rwlock);
2581 break;
2582 }
2583 }
2584
2585 return true;
2586 }
2587
2588 static bool
2589 update_learning_table__(const struct xbridge *xbridge,
2590 struct xbundle *in_xbundle, struct eth_addr dl_src,
2591 int vlan, bool is_grat_arp)
2592 {
2593 return (in_xbundle == &ofpp_none_bundle
2594 || !mac_learning_update(xbridge->ml, dl_src, vlan,
2595 is_grat_arp,
2596 in_xbundle->bond != NULL,
2597 in_xbundle->ofbundle));
2598 }
2599
2600 static void
2601 update_learning_table(const struct xlate_ctx *ctx,
2602 struct xbundle *in_xbundle, struct eth_addr dl_src,
2603 int vlan, bool is_grat_arp)
2604 {
2605 if (!update_learning_table__(ctx->xbridge, in_xbundle, dl_src, vlan,
2606 is_grat_arp)) {
2607 xlate_report_debug(ctx, OFT_DETAIL, "learned that "ETH_ADDR_FMT" is "
2608 "on port %s in VLAN %d",
2609 ETH_ADDR_ARGS(dl_src), in_xbundle->name, vlan);
2610 }
2611 }
2612
2613 /* Updates multicast snooping table 'ms' given that a packet matching 'flow'
2614 * was received on 'in_xbundle' in 'vlan' and is either Report or Query. */
2615 static void
2616 update_mcast_snooping_table4__(const struct xlate_ctx *ctx,
2617 const struct flow *flow,
2618 struct mcast_snooping *ms, int vlan,
2619 struct xbundle *in_xbundle,
2620 const struct dp_packet *packet)
2621 OVS_REQ_WRLOCK(ms->rwlock)
2622 {
2623 const struct igmp_header *igmp;
2624 int count;
2625 size_t offset;
2626 ovs_be32 ip4 = flow->igmp_group_ip4;
2627
2628 offset = (char *) dp_packet_l4(packet) - (char *) dp_packet_data(packet);
2629 igmp = dp_packet_at(packet, offset, IGMP_HEADER_LEN);
2630 if (!igmp || csum(igmp, dp_packet_l4_size(packet)) != 0) {
2631 xlate_report_debug(ctx, OFT_DETAIL,
2632 "multicast snooping received bad IGMP "
2633 "checksum on port %s in VLAN %d",
2634 in_xbundle->name, vlan);
2635 return;
2636 }
2637
2638 switch (ntohs(flow->tp_src)) {
2639 case IGMP_HOST_MEMBERSHIP_REPORT:
2640 case IGMPV2_HOST_MEMBERSHIP_REPORT:
2641 if (mcast_snooping_add_group4(ms, ip4, vlan, in_xbundle->ofbundle)) {
2642 xlate_report_debug(ctx, OFT_DETAIL,
2643 "multicast snooping learned that "
2644 IP_FMT" is on port %s in VLAN %d",
2645 IP_ARGS(ip4), in_xbundle->name, vlan);
2646 }
2647 break;
2648 case IGMP_HOST_LEAVE_MESSAGE:
2649 if (mcast_snooping_leave_group4(ms, ip4, vlan, in_xbundle->ofbundle)) {
2650 xlate_report_debug(ctx, OFT_DETAIL, "multicast snooping leaving "
2651 IP_FMT" is on port %s in VLAN %d",
2652 IP_ARGS(ip4), in_xbundle->name, vlan);
2653 }
2654 break;
2655 case IGMP_HOST_MEMBERSHIP_QUERY:
2656 if (flow->nw_src && mcast_snooping_add_mrouter(ms, vlan,
2657 in_xbundle->ofbundle)) {
2658 xlate_report_debug(ctx, OFT_DETAIL, "multicast snooping query "
2659 "from "IP_FMT" is on port %s in VLAN %d",
2660 IP_ARGS(flow->nw_src), in_xbundle->name, vlan);
2661 }
2662 break;
2663 case IGMPV3_HOST_MEMBERSHIP_REPORT:
2664 count = mcast_snooping_add_report(ms, packet, vlan,
2665 in_xbundle->ofbundle);
2666 if (count) {
2667 xlate_report_debug(ctx, OFT_DETAIL, "multicast snooping processed "
2668 "%d addresses on port %s in VLAN %d",
2669 count, in_xbundle->name, vlan);
2670 }
2671 break;
2672 }
2673 }
2674
2675 static void
2676 update_mcast_snooping_table6__(const struct xlate_ctx *ctx,
2677 const struct flow *flow,
2678 struct mcast_snooping *ms, int vlan,
2679 struct xbundle *in_xbundle,
2680 const struct dp_packet *packet)
2681 OVS_REQ_WRLOCK(ms->rwlock)
2682 {
2683 const struct mld_header *mld;
2684 int count;
2685 size_t offset;
2686
2687 offset = (char *) dp_packet_l4(packet) - (char *) dp_packet_data(packet);
2688 mld = dp_packet_at(packet, offset, MLD_HEADER_LEN);
2689
2690 if (!mld ||
2691 packet_csum_upperlayer6(dp_packet_l3(packet),
2692 mld, IPPROTO_ICMPV6,
2693 dp_packet_l4_size(packet)) != 0) {
2694 xlate_report_debug(ctx, OFT_DETAIL, "multicast snooping received "
2695 "bad MLD checksum on port %s in VLAN %d",
2696 in_xbundle->name, vlan);
2697 return;
2698 }
2699
2700 switch (ntohs(flow->tp_src)) {
2701 case MLD_QUERY:
2702 if (!ipv6_addr_equals(&flow->ipv6_src, &in6addr_any)
2703 && mcast_snooping_add_mrouter(ms, vlan, in_xbundle->ofbundle)) {
2704 xlate_report_debug(ctx, OFT_DETAIL, "multicast snooping query on "
2705 "port %s in VLAN %d", in_xbundle->name, vlan);
2706 }
2707 break;
2708 case MLD_REPORT:
2709 case MLD_DONE:
2710 case MLD2_REPORT:
2711 count = mcast_snooping_add_mld(ms, packet, vlan, in_xbundle->ofbundle);
2712 if (count) {
2713 xlate_report_debug(ctx, OFT_DETAIL, "multicast snooping processed "
2714 "%d addresses on port %s in VLAN %d",
2715 count, in_xbundle->name, vlan);
2716 }
2717 break;
2718 }
2719 }
2720
2721 /* Updates multicast snooping table 'ms' given that a packet matching 'flow'
2722 * was received on 'in_xbundle' in 'vlan'. */
2723 static void
2724 update_mcast_snooping_table(const struct xlate_ctx *ctx,
2725 const struct flow *flow, int vlan,
2726 struct xbundle *in_xbundle,
2727 const struct dp_packet *packet)
2728 {
2729 struct mcast_snooping *ms = ctx->xbridge->ms;
2730 struct xbundle *mcast_xbundle;
2731 struct mcast_port_bundle *fport;
2732
2733 /* Don't learn the OFPP_NONE port. */
2734 if (in_xbundle == &ofpp_none_bundle) {
2735 return;
2736 }
2737
2738 /* Don't learn from flood ports */
2739 mcast_xbundle = NULL;
2740 ovs_rwlock_wrlock(&ms->rwlock);
2741 LIST_FOR_EACH(fport, node, &ms->fport_list) {
2742 mcast_xbundle = xbundle_lookup(ctx->xcfg, fport->port);
2743 if (mcast_xbundle == in_xbundle) {
2744 break;
2745 }
2746 }
2747
2748 if (!mcast_xbundle || mcast_xbundle != in_xbundle) {
2749 if (flow->dl_type == htons(ETH_TYPE_IP)) {
2750 update_mcast_snooping_table4__(ctx, flow, ms, vlan,
2751 in_xbundle, packet);
2752 } else {
2753 update_mcast_snooping_table6__(ctx, flow, ms, vlan,
2754 in_xbundle, packet);
2755 }
2756 }
2757 ovs_rwlock_unlock(&ms->rwlock);
2758 }
2759 \f
2760 /* A list of multicast output ports.
2761 *
2762 * We accumulate output ports and then do all the outputs afterward. It would
2763 * be more natural to do the outputs one at a time as we discover the need for
2764 * each one, but this can cause a deadlock because we need to take the
2765 * mcast_snooping's rwlock for reading to iterate through the port lists and
2766 * doing an output, if it goes to a patch port, can eventually come back to the
2767 * same mcast_snooping and attempt to take the write lock (see
2768 * https://github.com/openvswitch/ovs-issues/issues/153). */
2769 struct mcast_output {
2770 /* Discrete ports. */
2771 struct xbundle **xbundles;
2772 size_t n, allocated;
2773
2774 /* If set, flood to all ports. */
2775 bool flood;
2776 };
2777 #define MCAST_OUTPUT_INIT { NULL, 0, 0, false }
2778
2779 /* Add 'mcast_bundle' to 'out'. */
2780 static void
2781 mcast_output_add(struct mcast_output *out, struct xbundle *mcast_xbundle)
2782 {
2783 if (out->n >= out->allocated) {
2784 out->xbundles = x2nrealloc(out->xbundles, &out->allocated,
2785 sizeof *out->xbundles);
2786 }
2787 out->xbundles[out->n++] = mcast_xbundle;
2788 }
2789
2790 /* Outputs the packet in 'ctx' to all of the output ports in 'out', given input
2791 * bundle 'in_xbundle' and the current 'xvlan'. */
2792 static void
2793 mcast_output_finish(struct xlate_ctx *ctx, struct mcast_output *out,
2794 struct xbundle *in_xbundle, struct xvlan *xvlan)
2795 {
2796 if (out->flood) {
2797 xlate_normal_flood(ctx, in_xbundle, xvlan);
2798 } else {
2799 for (size_t i = 0; i < out->n; i++) {
2800 output_normal(ctx, out->xbundles[i], xvlan);
2801 }
2802 }
2803
2804 free(out->xbundles);
2805 }
2806
2807 /* send the packet to ports having the multicast group learned */
2808 static void
2809 xlate_normal_mcast_send_group(struct xlate_ctx *ctx,
2810 struct mcast_snooping *ms OVS_UNUSED,
2811 struct mcast_group *grp,
2812 struct xbundle *in_xbundle,
2813 struct mcast_output *out)
2814 OVS_REQ_RDLOCK(ms->rwlock)
2815 {
2816 struct mcast_group_bundle *b;
2817 struct xbundle *mcast_xbundle;
2818
2819 LIST_FOR_EACH(b, bundle_node, &grp->bundle_lru) {
2820 mcast_xbundle = xbundle_lookup(ctx->xcfg, b->port);
2821 if (mcast_xbundle && mcast_xbundle != in_xbundle) {
2822 xlate_report(ctx, OFT_DETAIL, "forwarding to mcast group port");
2823 mcast_output_add(out, mcast_xbundle);
2824 } else if (!mcast_xbundle) {
2825 xlate_report(ctx, OFT_WARN,
2826 "mcast group port is unknown, dropping");
2827 } else {
2828 xlate_report(ctx, OFT_DETAIL,
2829 "mcast group port is input port, dropping");
2830 }
2831 }
2832 }
2833
2834 /* send the packet to ports connected to multicast routers */
2835 static void
2836 xlate_normal_mcast_send_mrouters(struct xlate_ctx *ctx,
2837 struct mcast_snooping *ms,
2838 struct xbundle *in_xbundle,
2839 const struct xvlan *xvlan,
2840 struct mcast_output *out)
2841 OVS_REQ_RDLOCK(ms->rwlock)
2842 {
2843 struct mcast_mrouter_bundle *mrouter;
2844 struct xbundle *mcast_xbundle;
2845
2846 LIST_FOR_EACH(mrouter, mrouter_node, &ms->mrouter_lru) {
2847 mcast_xbundle = xbundle_lookup(ctx->xcfg, mrouter->port);
2848 if (mcast_xbundle && mcast_xbundle != in_xbundle
2849 && mrouter->vlan == xvlan->v[0].vid) {
2850 xlate_report(ctx, OFT_DETAIL, "forwarding to mcast router port");
2851 mcast_output_add(out, mcast_xbundle);
2852 } else if (!mcast_xbundle) {
2853 xlate_report(ctx, OFT_WARN,
2854 "mcast router port is unknown, dropping");
2855 } else if (mrouter->vlan != xvlan->v[0].vid) {
2856 xlate_report(ctx, OFT_DETAIL,
2857 "mcast router is on another vlan, dropping");
2858 } else {
2859 xlate_report(ctx, OFT_DETAIL,
2860 "mcast router port is input port, dropping");
2861 }
2862 }
2863 }
2864
2865 /* send the packet to ports flagged to be flooded */
2866 static void
2867 xlate_normal_mcast_send_fports(struct xlate_ctx *ctx,
2868 struct mcast_snooping *ms,
2869 struct xbundle *in_xbundle,
2870 struct mcast_output *out)
2871 OVS_REQ_RDLOCK(ms->rwlock)
2872 {
2873 struct mcast_port_bundle *fport;
2874 struct xbundle *mcast_xbundle;
2875
2876 LIST_FOR_EACH(fport, node, &ms->fport_list) {
2877 mcast_xbundle = xbundle_lookup(ctx->xcfg, fport->port);
2878 if (mcast_xbundle && mcast_xbundle != in_xbundle) {
2879 xlate_report(ctx, OFT_DETAIL, "forwarding to mcast flood port");
2880 mcast_output_add(out, mcast_xbundle);
2881 } else if (!mcast_xbundle) {
2882 xlate_report(ctx, OFT_WARN,
2883 "mcast flood port is unknown, dropping");
2884 } else {
2885 xlate_report(ctx, OFT_DETAIL,
2886 "mcast flood port is input port, dropping");
2887 }
2888 }
2889 }
2890
2891 /* forward the Reports to configured ports */
2892 static void
2893 xlate_normal_mcast_send_rports(struct xlate_ctx *ctx,
2894 struct mcast_snooping *ms,
2895 struct xbundle *in_xbundle,
2896 struct mcast_output *out)
2897 OVS_REQ_RDLOCK(ms->rwlock)
2898 {
2899 struct mcast_port_bundle *rport;
2900 struct xbundle *mcast_xbundle;
2901
2902 LIST_FOR_EACH(rport, node, &ms->rport_list) {
2903 mcast_xbundle = xbundle_lookup(ctx->xcfg, rport->port);
2904 if (mcast_xbundle
2905 && mcast_xbundle != in_xbundle
2906 && mcast_xbundle->ofbundle != in_xbundle->ofbundle) {
2907 xlate_report(ctx, OFT_DETAIL,
2908 "forwarding report to mcast flagged port");
2909 mcast_output_add(out, mcast_xbundle);
2910 } else if (!mcast_xbundle) {
2911 xlate_report(ctx, OFT_WARN,
2912 "mcast port is unknown, dropping the report");
2913 } else {
2914 xlate_report(ctx, OFT_DETAIL,
2915 "mcast port is input port, dropping the Report");
2916 }
2917 }
2918 }
2919
2920 static void
2921 xlate_normal_flood(struct xlate_ctx *ctx, struct xbundle *in_xbundle,
2922 struct xvlan *xvlan)
2923 {
2924 struct xbundle *xbundle;
2925
2926 LIST_FOR_EACH (xbundle, list_node, &ctx->xbridge->xbundles) {
2927 if (xbundle != in_xbundle
2928 && xbundle->ofbundle != in_xbundle->ofbundle
2929 && xbundle_includes_vlan(xbundle, xvlan)
2930 && xbundle->floodable
2931 && !xbundle_mirror_out(ctx->xbridge, xbundle)) {
2932 output_normal(ctx, xbundle, xvlan);
2933 }
2934 }
2935 ctx->nf_output_iface = NF_OUT_FLOOD;
2936 }
2937
2938 static bool
2939 is_ip_local_multicast(const struct flow *flow, struct flow_wildcards *wc)
2940 {
2941 if (flow->dl_type == htons(ETH_TYPE_IP)) {
2942 memset(&wc->masks.nw_dst, 0xff, sizeof wc->masks.nw_dst);
2943 return ip_is_local_multicast(flow->nw_dst);
2944 } else if (flow->dl_type == htons(ETH_TYPE_IPV6)) {
2945 memset(&wc->masks.ipv6_dst, 0xff, sizeof wc->masks.ipv6_dst);
2946 return ipv6_is_all_hosts(&flow->ipv6_dst);
2947 } else {
2948 return false;
2949 }
2950 }
2951
2952 static void
2953 xlate_normal(struct xlate_ctx *ctx)
2954 {
2955 struct flow_wildcards *wc = ctx->wc;
2956 struct flow *flow = &ctx->xin->flow;
2957 struct xbundle *in_xbundle;
2958 struct xport *in_port;
2959 struct mac_entry *mac;
2960 void *mac_port;
2961 struct xvlan in_xvlan;
2962 struct xvlan xvlan;
2963 uint16_t vlan;
2964
2965 memset(&wc->masks.dl_src, 0xff, sizeof wc->masks.dl_src);
2966 memset(&wc->masks.dl_dst, 0xff, sizeof wc->masks.dl_dst);
2967 wc->masks.vlans[0].tci |= htons(VLAN_VID_MASK | VLAN_CFI);
2968
2969 in_xbundle = lookup_input_bundle(ctx, flow->in_port.ofp_port, &in_port);
2970 if (!in_xbundle) {
2971 xlate_report(ctx, OFT_WARN, "no input bundle, dropping");
2972 return;
2973 }
2974
2975 /* Drop malformed frames. */
2976 if (eth_type_vlan(flow->dl_type) &&
2977 !(flow->vlans[0].tci & htons(VLAN_CFI))) {
2978 if (ctx->xin->packet != NULL) {
2979 xlate_report_error(ctx, "dropping packet with partial "
2980 "VLAN tag received on port %s",
2981 in_xbundle->name);
2982 }
2983 xlate_report(ctx, OFT_WARN, "partial VLAN tag, dropping");
2984 return;
2985 }
2986
2987 /* Drop frames on bundles reserved for mirroring. */
2988 if (xbundle_mirror_out(ctx->xbridge, in_xbundle)) {
2989 if (ctx->xin->packet != NULL) {
2990 xlate_report_error(ctx, "dropping packet received on port %s, "
2991 "which is reserved exclusively for mirroring",
2992 in_xbundle->name);
2993 }
2994 xlate_report(ctx, OFT_WARN,
2995 "input port is mirror output port, dropping");
2996 return;
2997 }
2998
2999 /* Check VLAN. */
3000 xvlan_extract(flow, &in_xvlan);
3001 if (!input_vid_is_valid(ctx, in_xvlan.v[0].vid, in_xbundle)) {
3002 xlate_report(ctx, OFT_WARN,
3003 "disallowed VLAN VID for this input port, dropping");
3004 return;
3005 }
3006 xvlan_input_translate(in_xbundle, &in_xvlan, &xvlan);
3007 vlan = xvlan.v[0].vid;
3008
3009 /* Check other admissibility requirements. */
3010 if (in_port && !is_admissible(ctx, in_port, vlan)) {
3011 return;
3012 }
3013
3014 /* Learn source MAC. */
3015 bool is_grat_arp = is_gratuitous_arp(flow, wc);
3016 if (ctx->xin->allow_side_effects
3017 && flow->packet_type == htonl(PT_ETH)
3018 && in_port->pt_mode != NETDEV_PT_LEGACY_L3
3019 ) {
3020 update_learning_table(ctx, in_xbundle, flow->dl_src, vlan,
3021 is_grat_arp);
3022 }
3023 if (ctx->xin->xcache && in_xbundle != &ofpp_none_bundle) {
3024 struct xc_entry *entry;
3025
3026 /* Save just enough info to update mac learning table later. */
3027 entry = xlate_cache_add_entry(ctx->xin->xcache, XC_NORMAL);
3028 entry->normal.ofproto = ctx->xbridge->ofproto;
3029 entry->normal.in_port = flow->in_port.ofp_port;
3030 entry->normal.dl_src = flow->dl_src;
3031 entry->normal.vlan = vlan;
3032 entry->normal.is_gratuitous_arp = is_grat_arp;
3033 }
3034
3035 /* Determine output bundle. */
3036 if (mcast_snooping_enabled(ctx->xbridge->ms)
3037 && !eth_addr_is_broadcast(flow->dl_dst)
3038 && eth_addr_is_multicast(flow->dl_dst)
3039 && is_ip_any(flow)) {
3040 struct mcast_snooping *ms = ctx->xbridge->ms;
3041 struct mcast_group *grp = NULL;
3042
3043 if (is_igmp(flow, wc)) {
3044 /*
3045 * IGMP packets need to take the slow path, in order to be
3046 * processed for mdb updates. That will prevent expires
3047 * firing off even after hosts have sent reports.
3048 */
3049 ctx->xout->slow |= SLOW_ACTION;
3050
3051 memset(&wc->masks.tp_src, 0xff, sizeof wc->masks.tp_src);
3052 if (mcast_snooping_is_membership(flow->tp_src) ||
3053 mcast_snooping_is_query(flow->tp_src)) {
3054 if (ctx->xin->allow_side_effects && ctx->xin->packet) {
3055 update_mcast_snooping_table(ctx, flow, vlan,
3056 in_xbundle, ctx->xin->packet);
3057 }
3058 }
3059
3060 if (mcast_snooping_is_membership(flow->tp_src)) {
3061 struct mcast_output out = MCAST_OUTPUT_INIT;
3062
3063 ovs_rwlock_rdlock(&ms->rwlock);
3064 xlate_normal_mcast_send_mrouters(ctx, ms, in_xbundle, &xvlan,
3065 &out);
3066 /* RFC4541: section 2.1.1, item 1: A snooping switch should
3067 * forward IGMP Membership Reports only to those ports where
3068 * multicast routers are attached. Alternatively stated: a
3069 * snooping switch should not forward IGMP Membership Reports
3070 * to ports on which only hosts are attached.
3071 * An administrative control may be provided to override this
3072 * restriction, allowing the report messages to be flooded to
3073 * other ports. */
3074 xlate_normal_mcast_send_rports(ctx, ms, in_xbundle, &out);
3075 ovs_rwlock_unlock(&ms->rwlock);
3076
3077 mcast_output_finish(ctx, &out, in_xbundle, &xvlan);
3078 } else {
3079 xlate_report(ctx, OFT_DETAIL, "multicast traffic, flooding");
3080 xlate_normal_flood(ctx, in_xbundle, &xvlan);
3081 }
3082 return;
3083 } else if (is_mld(flow, wc)) {
3084 ctx->xout->slow |= SLOW_ACTION;
3085 if (ctx->xin->allow_side_effects && ctx->xin->packet) {
3086 update_mcast_snooping_table(ctx, flow, vlan,
3087 in_xbundle, ctx->xin->packet);
3088 }
3089 if (is_mld_report(flow, wc)) {
3090 struct mcast_output out = MCAST_OUTPUT_INIT;
3091
3092 ovs_rwlock_rdlock(&ms->rwlock);
3093 xlate_normal_mcast_send_mrouters(ctx, ms, in_xbundle, &xvlan,
3094 &out);
3095 xlate_normal_mcast_send_rports(ctx, ms, in_xbundle, &out);
3096 ovs_rwlock_unlock(&ms->rwlock);
3097
3098 mcast_output_finish(ctx, &out, in_xbundle, &xvlan);
3099 } else {
3100 xlate_report(ctx, OFT_DETAIL, "MLD query, flooding");
3101 xlate_normal_flood(ctx, in_xbundle, &xvlan);
3102 }
3103 return;
3104 } else {
3105 if (is_ip_local_multicast(flow, wc)) {
3106 /* RFC4541: section 2.1.2, item 2: Packets with a dst IP
3107 * address in the 224.0.0.x range which are not IGMP must
3108 * be forwarded on all ports */
3109 xlate_report(ctx, OFT_DETAIL,
3110 "RFC4541: section 2.1.2, item 2, flooding");
3111 xlate_normal_flood(ctx, in_xbundle, &xvlan);
3112 return;
3113 }
3114 }
3115
3116 /* forwarding to group base ports */
3117 struct mcast_output out = MCAST_OUTPUT_INIT;
3118
3119 ovs_rwlock_rdlock(&ms->rwlock);
3120 if (flow->dl_type == htons(ETH_TYPE_IP)) {
3121 grp = mcast_snooping_lookup4(ms, flow->nw_dst, vlan);
3122 } else if (flow->dl_type == htons(ETH_TYPE_IPV6)) {
3123 grp = mcast_snooping_lookup(ms, &flow->ipv6_dst, vlan);
3124 }
3125 if (grp) {
3126 xlate_normal_mcast_send_group(ctx, ms, grp, in_xbundle, &out);
3127 xlate_normal_mcast_send_fports(ctx, ms, in_xbundle, &out);
3128 xlate_normal_mcast_send_mrouters(ctx, ms, in_xbundle, &xvlan,
3129 &out);
3130 } else {
3131 if (mcast_snooping_flood_unreg(ms)) {
3132 xlate_report(ctx, OFT_DETAIL,
3133 "unregistered multicast, flooding");
3134 out.flood = true;
3135 } else {
3136 xlate_normal_mcast_send_mrouters(ctx, ms, in_xbundle, &xvlan,
3137 &out);
3138 xlate_normal_mcast_send_fports(ctx, ms, in_xbundle, &out);
3139 }
3140 }
3141 ovs_rwlock_unlock(&ms->rwlock);
3142
3143 mcast_output_finish(ctx, &out, in_xbundle, &xvlan);
3144 } else {
3145 ovs_rwlock_rdlock(&ctx->xbridge->ml->rwlock);
3146 mac = mac_learning_lookup(ctx->xbridge->ml, flow->dl_dst, vlan);
3147 mac_port = mac ? mac_entry_get_port(ctx->xbridge->ml, mac) : NULL;
3148 ovs_rwlock_unlock(&ctx->xbridge->ml->rwlock);
3149
3150 if (mac_port) {
3151 struct xbundle *mac_xbundle = xbundle_lookup(ctx->xcfg, mac_port);
3152
3153 if (mac_xbundle && xbundle_mirror_out(ctx->xbridge, mac_xbundle)) {
3154 xlate_report(ctx, OFT_WARN,
3155 "learned port is a mirror port, dropping");
3156 return;
3157 }
3158
3159 if (mac_xbundle
3160 && mac_xbundle != in_xbundle
3161 && mac_xbundle->ofbundle != in_xbundle->ofbundle) {
3162 xlate_report(ctx, OFT_DETAIL, "forwarding to learned port");
3163 output_normal(ctx, mac_xbundle, &xvlan);
3164 } else if (!mac_xbundle) {
3165 xlate_report(ctx, OFT_WARN,
3166 "learned port is unknown, dropping");
3167 } else {
3168 xlate_report(ctx, OFT_DETAIL,
3169 "learned port is input port, dropping");
3170 }
3171 } else {
3172 xlate_report(ctx, OFT_DETAIL,
3173 "no learned MAC for destination, flooding");
3174 xlate_normal_flood(ctx, in_xbundle, &xvlan);
3175 }
3176 }
3177 }
3178
3179 /* Appends a "sample" action for sFlow or IPFIX to 'ctx->odp_actions'. The
3180 * 'probability' is the number of packets out of UINT32_MAX to sample. The
3181 * 'cookie' is passed back in the callback for each sampled packet.
3182 * 'tunnel_out_port', if not ODPP_NONE, is added as the
3183 * OVS_USERSPACE_ATTR_EGRESS_TUN_PORT attribute. If 'include_actions',
3184 * an OVS_USERSPACE_ATTR_ACTIONS attribute is added. If
3185 * 'emit_set_tunnel', sample(sampling_port=1) would translate into
3186 * datapath sample action set(tunnel(...)), sample(...) and it is used
3187 * for sampling egress tunnel information.
3188 */
3189 static size_t
3190 compose_sample_action(struct xlate_ctx *ctx,
3191 const uint32_t probability,
3192 const struct user_action_cookie *cookie,
3193 const odp_port_t tunnel_out_port,
3194 bool include_actions)
3195 {
3196 if (probability == 0) {
3197 /* No need to generate sampling or the inner action. */
3198 return 0;
3199 }
3200
3201 /* If the slow path meter is configured by the controller,
3202 * insert a meter action before the user space action. */
3203 struct ofproto *ofproto = &ctx->xin->ofproto->up;
3204 uint32_t meter_id = ofproto->slowpath_meter_id;
3205
3206 /* When meter action is not required, avoid generate sample action
3207 * for 100% sampling rate. */
3208 bool is_sample = probability < UINT32_MAX || meter_id != UINT32_MAX;
3209 size_t sample_offset = 0, actions_offset = 0;
3210 if (is_sample) {
3211 sample_offset = nl_msg_start_nested(ctx->odp_actions,
3212 OVS_ACTION_ATTR_SAMPLE);
3213 nl_msg_put_u32(ctx->odp_actions, OVS_SAMPLE_ATTR_PROBABILITY,
3214 probability);
3215 actions_offset = nl_msg_start_nested(ctx->odp_actions,
3216 OVS_SAMPLE_ATTR_ACTIONS);
3217 }
3218
3219 if (meter_id != UINT32_MAX) {
3220 nl_msg_put_u32(ctx->odp_actions, OVS_ACTION_ATTR_METER, meter_id);
3221 }
3222
3223 odp_port_t odp_port = ofp_port_to_odp_port(
3224 ctx->xbridge, ctx->xin->flow.in_port.ofp_port);
3225 uint32_t pid = dpif_port_get_pid(ctx->xbridge->dpif, odp_port);
3226 size_t cookie_offset;
3227 int res = odp_put_userspace_action(pid, cookie, sizeof *cookie,
3228 tunnel_out_port, include_actions,
3229 ctx->odp_actions, &cookie_offset);
3230 ovs_assert(res == 0);
3231 if (is_sample) {
3232 nl_msg_end_nested(ctx->odp_actions, actions_offset);
3233 nl_msg_end_nested(ctx->odp_actions, sample_offset);
3234 }
3235
3236 return cookie_offset;
3237 }
3238
3239 /* If sFLow is not enabled, returns 0 without doing anything.
3240 *
3241 * If sFlow is enabled, appends a template "sample" action to the ODP actions
3242 * in 'ctx'. This action is a template because some of the information needed
3243 * to fill it out is not available until flow translation is complete. In this
3244 * case, this functions returns an offset, which is always nonzero, to pass
3245 * later to fix_sflow_action() to fill in the rest of the template. */
3246 static size_t
3247 compose_sflow_action(struct xlate_ctx *ctx)
3248 {
3249 struct dpif_sflow *sflow = ctx->xbridge->sflow;
3250 if (!sflow || ctx->xin->flow.in_port.ofp_port == OFPP_NONE) {
3251 return 0;
3252 }
3253
3254 struct user_action_cookie cookie;
3255
3256 memset(&cookie, 0, sizeof cookie);
3257 cookie.type = USER_ACTION_COOKIE_SFLOW;
3258 cookie.ofp_in_port = ctx->xin->flow.in_port.ofp_port;
3259 cookie.ofproto_uuid = ctx->xbridge->ofproto->uuid;
3260
3261 return compose_sample_action(ctx, dpif_sflow_get_probability(sflow),
3262 &cookie, ODPP_NONE, true);
3263 }
3264
3265 /* If flow IPFIX is enabled, make sure IPFIX flow sample action
3266 * at egress point of tunnel port is just in front of corresponding
3267 * output action. If bridge IPFIX is enabled, this appends an IPFIX
3268 * sample action to 'ctx->odp_actions'. */
3269 static void
3270 compose_ipfix_action(struct xlate_ctx *ctx, odp_port_t output_odp_port)
3271 {
3272 struct dpif_ipfix *ipfix = ctx->xbridge->ipfix;
3273 odp_port_t tunnel_out_port = ODPP_NONE;
3274
3275 if (!ipfix || ctx->xin->flow.in_port.ofp_port == OFPP_NONE) {
3276 return;
3277 }
3278
3279 /* For input case, output_odp_port is ODPP_NONE, which is an invalid port
3280 * number. */
3281 if (output_odp_port == ODPP_NONE &&
3282 !dpif_ipfix_get_bridge_exporter_input_sampling(ipfix)) {
3283 return;
3284 }
3285
3286 /* For output case, output_odp_port is valid. */
3287 if (output_odp_port != ODPP_NONE) {
3288 if (!dpif_ipfix_get_bridge_exporter_output_sampling(ipfix)) {
3289 return;
3290 }
3291 /* If tunnel sampling is enabled, put an additional option attribute:
3292 * OVS_USERSPACE_ATTR_TUNNEL_OUT_PORT
3293 */
3294 if (dpif_ipfix_get_bridge_exporter_tunnel_sampling(ipfix) &&
3295 dpif_ipfix_is_tunnel_port(ipfix, output_odp_port) ) {
3296 tunnel_out_port = output_odp_port;
3297 }
3298 }
3299
3300 struct user_action_cookie cookie;
3301
3302 memset(&cookie, 0, sizeof cookie);
3303 cookie.type = USER_ACTION_COOKIE_IPFIX;
3304 cookie.ofp_in_port = ctx->xin->flow.in_port.ofp_port;
3305 cookie.ofproto_uuid = ctx->xbridge->ofproto->uuid;
3306 cookie.ipfix.output_odp_port = output_odp_port;
3307
3308 compose_sample_action(ctx,
3309 dpif_ipfix_get_bridge_exporter_probability(ipfix),
3310 &cookie, tunnel_out_port, false);
3311 }
3312
3313 /* Fix "sample" action according to data collected while composing ODP actions,
3314 * as described in compose_sflow_action().
3315 *
3316 * 'user_cookie_offset' must be the offset returned by
3317 * compose_sflow_action(). */
3318 static void
3319 fix_sflow_action(struct xlate_ctx *ctx, unsigned int user_cookie_offset)
3320 {
3321 const struct flow *base = &ctx->base_flow;
3322 struct user_action_cookie *cookie;
3323
3324 cookie = ofpbuf_at(ctx->odp_actions, user_cookie_offset, sizeof *cookie);
3325 ovs_assert(cookie->type == USER_ACTION_COOKIE_SFLOW);
3326
3327 cookie->sflow.vlan_tci = base->vlans[0].tci;
3328
3329 /* See http://www.sflow.org/sflow_version_5.txt (search for "Input/output
3330 * port information") for the interpretation of cookie->output. */
3331 switch (ctx->sflow_n_outputs) {
3332 case 0:
3333 /* 0x40000000 | 256 means "packet dropped for unknown reason". */
3334 cookie->sflow.output = 0x40000000 | 256;
3335 break;
3336
3337 case 1:
3338 cookie->sflow.output = dpif_sflow_odp_port_to_ifindex(
3339 ctx->xbridge->sflow, ctx->sflow_odp_port);
3340 if (cookie->sflow.output) {
3341 break;
3342 }
3343 /* Fall through. */
3344 default:
3345 /* 0x80000000 means "multiple output ports. */
3346 cookie->sflow.output = 0x80000000 | ctx->sflow_n_outputs;
3347 break;
3348 }
3349 }
3350
3351 static bool
3352 process_special(struct xlate_ctx *ctx, const struct xport *xport)
3353 {
3354 const struct flow *flow = &ctx->xin->flow;
3355 struct flow_wildcards *wc = ctx->wc;
3356 const struct xbridge *xbridge = ctx->xbridge;
3357 const struct dp_packet *packet = ctx->xin->packet;
3358 enum slow_path_reason slow;
3359 bool lacp_may_enable;
3360
3361 if (!xport) {
3362 slow = 0;
3363 } else if (xport->cfm && cfm_should_process_flow(xport->cfm, flow, wc)) {
3364 if (packet) {
3365 cfm_process_heartbeat(xport->cfm, packet);
3366 }
3367 slow = SLOW_CFM;
3368 } else if (xport->bfd && bfd_should_process_flow(xport->bfd, flow, wc)) {
3369 if (packet) {
3370 bfd_process_packet(xport->bfd, flow, packet);
3371 /* If POLL received, immediately sends FINAL back. */
3372 if (bfd_should_send_packet(xport->bfd)) {
3373 ofproto_dpif_monitor_port_send_soon(xport->ofport);
3374 }
3375 }
3376 slow = SLOW_BFD;
3377 } else if (xport->xbundle && xport->xbundle->lacp
3378 && flow->dl_type == htons(ETH_TYPE_LACP)) {
3379 if (packet) {
3380 lacp_may_enable = lacp_process_packet(xport->xbundle->lacp,
3381 xport->ofport, packet);
3382 /* Update LACP status in bond-member to avoid packet-drops
3383 * until LACP state machine is run by the main thread. */
3384 if (xport->xbundle->bond && lacp_may_enable) {
3385 bond_member_set_may_enable(xport->xbundle->bond, xport->ofport,
3386 lacp_may_enable);
3387 }
3388 }
3389 slow = SLOW_LACP;
3390 } else if ((xbridge->stp || xbridge->rstp) &&
3391 stp_should_process_flow(flow, wc)) {
3392 if (packet) {
3393 xbridge->stp
3394 ? stp_process_packet(xport, packet)
3395 : rstp_process_packet(xport, packet);
3396 }
3397 slow = SLOW_STP;
3398 } else if (xport->lldp && lldp_should_process_flow(xport->lldp, flow)) {
3399 if (packet) {
3400 lldp_process_packet(xport->lldp, packet);
3401 }
3402 slow = SLOW_LLDP;
3403 } else {
3404 slow = 0;
3405 }
3406
3407 if (slow) {
3408 ctx->xout->slow |= slow;
3409 return true;
3410 } else {
3411 return false;
3412 }
3413 }
3414
3415 static int
3416 tnl_route_lookup_flow(const struct xlate_ctx *ctx,
3417 const struct flow *oflow,
3418 struct in6_addr *ip, struct in6_addr *src,
3419 struct xport **out_port)
3420 {
3421 char out_dev[IFNAMSIZ];
3422 struct xbridge *xbridge;
3423 struct in6_addr gw;
3424 struct in6_addr dst;
3425
3426 dst = flow_tnl_dst(&oflow->tunnel);
3427 if (!ovs_router_lookup(oflow->pkt_mark, &dst, out_dev, src, &gw)) {
3428 return -ENOENT;
3429 }
3430
3431 if (ipv6_addr_is_set(&gw) &&
3432 (!IN6_IS_ADDR_V4MAPPED(&gw) || in6_addr_get_mapped_ipv4(&gw))) {
3433 *ip = gw;
3434 } else {
3435 *ip = dst;
3436 }
3437
3438 HMAP_FOR_EACH (xbridge, hmap_node, &ctx->xcfg->xbridges) {
3439 if (!strncmp(xbridge->name, out_dev, IFNAMSIZ)) {
3440 struct xport *port;
3441
3442 HMAP_FOR_EACH (port, ofp_node, &xbridge->xports) {
3443 if (!strncmp(netdev_get_name(port->netdev), out_dev, IFNAMSIZ)) {
3444 *out_port = port;
3445 return 0;
3446 }
3447 }
3448 }
3449 }
3450
3451 /* If tunnel IP isn't configured on bridges, then we search all ports. */
3452 HMAP_FOR_EACH (xbridge, hmap_node, &ctx->xcfg->xbridges) {
3453 struct xport *port;
3454
3455 HMAP_FOR_EACH (port, ofp_node, &xbridge->xports) {
3456 if (!strncmp(netdev_get_name(port->netdev),
3457 out_dev, IFNAMSIZ)) {
3458 *out_port = port;
3459 return 0;
3460 }
3461 }
3462 }
3463 return -ENOENT;
3464 }
3465
3466 static int
3467 compose_table_xlate(struct xlate_ctx *ctx, const struct xport *out_dev,
3468 struct dp_packet *packet)
3469 {
3470 struct xbridge *xbridge = out_dev->xbridge;
3471 ovs_version_t version = ofproto_dpif_get_tables_version(xbridge->ofproto);
3472 struct ofpact_output output;
3473 struct flow flow;
3474
3475 ofpact_init(&output.ofpact, OFPACT_OUTPUT, sizeof output);
3476 flow_extract(packet, &flow);
3477 flow.in_port.ofp_port = out_dev->ofp_port;
3478 output.port = OFPP_TABLE;
3479 output.max_len = 0;
3480
3481 return ofproto_dpif_execute_actions__(xbridge->ofproto, version, &flow,
3482 NULL, &output.ofpact, sizeof output,
3483 ctx->depth, ctx->resubmits, packet);
3484 }
3485
3486 static void
3487 tnl_send_nd_request(struct xlate_ctx *ctx, const struct xport *out_dev,
3488 const struct eth_addr eth_src,
3489 struct in6_addr * ipv6_src, struct in6_addr * ipv6_dst)
3490 {
3491 struct dp_packet packet;
3492
3493 dp_packet_init(&packet, 0);
3494 compose_nd_ns(&packet, eth_src, ipv6_src, ipv6_dst);
3495 compose_table_xlate(ctx, out_dev, &packet);
3496 dp_packet_uninit(&packet);
3497 }
3498
3499 static void
3500 tnl_send_arp_request(struct xlate_ctx *ctx, const struct xport *out_dev,
3501 const struct eth_addr eth_src,
3502 ovs_be32 ip_src, ovs_be32 ip_dst)
3503 {
3504 struct dp_packet packet;
3505
3506 dp_packet_init(&packet, 0);
3507 compose_arp(&packet, ARP_OP_REQUEST,
3508 eth_src, eth_addr_zero, true, ip_src, ip_dst);
3509
3510 compose_table_xlate(ctx, out_dev, &packet);
3511 dp_packet_uninit(&packet);
3512 }
3513
3514 static void
3515 propagate_tunnel_data_to_flow__(struct flow *dst_flow,
3516 const struct flow *src_flow,
3517 struct eth_addr dmac, struct eth_addr smac,
3518 struct in6_addr s_ip6, ovs_be32 s_ip,
3519 bool is_tnl_ipv6, uint8_t nw_proto)
3520 {
3521 dst_flow->dl_dst = dmac;
3522 dst_flow->dl_src = smac;
3523
3524 dst_flow->packet_type = htonl(PT_ETH);
3525 dst_flow->nw_dst = src_flow->tunnel.ip_dst;
3526 dst_flow->nw_src = src_flow->tunnel.ip_src;
3527 dst_flow->ipv6_dst = src_flow->tunnel.ipv6_dst;
3528 dst_flow->ipv6_src = src_flow->tunnel.ipv6_src;
3529
3530 dst_flow->nw_frag = 0; /* Tunnel packets are unfragmented. */
3531 dst_flow->nw_tos = src_flow->tunnel.ip_tos;
3532 dst_flow->nw_ttl = src_flow->tunnel.ip_ttl;
3533 dst_flow->tp_dst = src_flow->tunnel.tp_dst;
3534 dst_flow->tp_src = src_flow->tunnel.tp_src;
3535
3536 if (is_tnl_ipv6) {
3537 dst_flow->dl_type = htons(ETH_TYPE_IPV6);
3538 if (ipv6_mask_is_any(&dst_flow->ipv6_src)
3539 && !ipv6_mask_is_any(&s_ip6)) {
3540 dst_flow->ipv6_src = s_ip6;
3541 }
3542 } else {
3543 dst_flow->dl_type = htons(ETH_TYPE_IP);
3544 if (dst_flow->nw_src == 0 && s_ip) {
3545 dst_flow->nw_src = s_ip;
3546 }
3547 }
3548 dst_flow->nw_proto = nw_proto;
3549 }
3550
3551 /*
3552 * Populate the 'flow' and 'base_flow' L3 fields to do the post tunnel push
3553 * translations.
3554 */
3555 static void
3556 propagate_tunnel_data_to_flow(struct xlate_ctx *ctx, struct eth_addr dmac,
3557 struct eth_addr smac, struct in6_addr s_ip6,
3558 ovs_be32 s_ip, bool is_tnl_ipv6,
3559 enum ovs_vport_type tnl_type)
3560 {
3561 struct flow *base_flow, *flow;
3562 flow = &ctx->xin->flow;
3563 base_flow = &ctx->base_flow;
3564 uint8_t nw_proto = 0;
3565
3566 switch (tnl_type) {
3567 case OVS_VPORT_TYPE_GRE:
3568 case OVS_VPORT_TYPE_ERSPAN:
3569 case OVS_VPORT_TYPE_IP6ERSPAN:
3570 case OVS_VPORT_TYPE_IP6GRE:
3571 nw_proto = IPPROTO_GRE;
3572 break;
3573 case OVS_VPORT_TYPE_VXLAN:
3574 case OVS_VPORT_TYPE_GENEVE:
3575 case OVS_VPORT_TYPE_GTPU:
3576 case OVS_VPORT_TYPE_BAREUDP:
3577 nw_proto = IPPROTO_UDP;
3578 break;
3579 case OVS_VPORT_TYPE_LISP:
3580 case OVS_VPORT_TYPE_STT:
3581 case OVS_VPORT_TYPE_UNSPEC:
3582 case OVS_VPORT_TYPE_NETDEV:
3583 case OVS_VPORT_TYPE_INTERNAL:
3584 case __OVS_VPORT_TYPE_MAX:
3585 default:
3586 OVS_NOT_REACHED();
3587 }
3588 /*
3589 * Update base_flow first followed by flow as the dst_flow gets modified
3590 * in the function.
3591 */
3592 propagate_tunnel_data_to_flow__(base_flow, flow, dmac, smac, s_ip6, s_ip,
3593 is_tnl_ipv6, nw_proto);
3594 propagate_tunnel_data_to_flow__(flow, flow, dmac, smac, s_ip6, s_ip,
3595 is_tnl_ipv6, nw_proto);
3596 }
3597
3598 static int
3599 native_tunnel_output(struct xlate_ctx *ctx, const struct xport *xport,
3600 const struct flow *flow, odp_port_t tunnel_odp_port,
3601 bool truncate)
3602 {
3603 struct netdev_tnl_build_header_params tnl_params;
3604 struct ovs_action_push_tnl tnl_push_data;
3605 struct xport *out_dev = NULL;
3606 ovs_be32 s_ip = 0, d_ip = 0;
3607 struct in6_addr s_ip6 = in6addr_any;
3608 struct in6_addr d_ip6 = in6addr_any;
3609 struct eth_addr smac;
3610 struct eth_addr dmac;
3611 int err;
3612 char buf_sip6[INET6_ADDRSTRLEN];
3613 char buf_dip6[INET6_ADDRSTRLEN];
3614
3615 /* Store sFlow data. */
3616 uint32_t sflow_n_outputs = ctx->sflow_n_outputs;
3617
3618 /* Structures to backup Ethernet and IP of base_flow. */
3619 struct flow old_base_flow;
3620 struct flow old_flow;
3621
3622 /* Backup flow & base_flow data. */
3623 memcpy(&old_base_flow, &ctx->base_flow, sizeof old_base_flow);
3624 memcpy(&old_flow, &ctx->xin->flow, sizeof old_flow);
3625
3626 if (flow->tunnel.ip_src) {
3627 in6_addr_set_mapped_ipv4(&s_ip6, flow->tunnel.ip_src);
3628 }
3629
3630 err = tnl_route_lookup_flow(ctx, flow, &d_ip6, &s_ip6, &out_dev);
3631 if (err) {
3632 xlate_report(ctx, OFT_WARN, "native tunnel routing failed");
3633 return err;
3634 }
3635
3636 xlate_report(ctx, OFT_DETAIL, "tunneling to %s via %s",
3637 ipv6_string_mapped(buf_dip6, &d_ip6),
3638 netdev_get_name(out_dev->netdev));
3639
3640 /* Use mac addr of bridge port of the peer. */
3641 err = netdev_get_etheraddr(out_dev->netdev, &smac);
3642 if (err) {
3643 xlate_report(ctx, OFT_WARN,
3644 "tunnel output device lacks Ethernet address");
3645 return err;
3646 }
3647
3648 d_ip = in6_addr_get_mapped_ipv4(&d_ip6);
3649 if (d_ip) {
3650 s_ip = in6_addr_get_mapped_ipv4(&s_ip6);
3651 }
3652
3653 err = tnl_neigh_lookup(out_dev->xbridge->name, &d_ip6, &dmac);
3654 if (err) {
3655 xlate_report(ctx, OFT_DETAIL,
3656 "neighbor cache miss for %s on bridge %s, "
3657 "sending %s request",
3658 buf_dip6, out_dev->xbridge->name, d_ip ? "ARP" : "ND");
3659 if (d_ip) {
3660 tnl_send_arp_request(ctx, out_dev, smac, s_ip, d_ip);
3661 } else {
3662 tnl_send_nd_request(ctx, out_dev, smac, &s_ip6, &d_ip6);
3663 }
3664 return err;
3665 }
3666
3667 if (ctx->xin->xcache) {
3668 struct xc_entry *entry;
3669
3670 entry = xlate_cache_add_entry(ctx->xin->xcache, XC_TNL_NEIGH);
3671 ovs_strlcpy(entry->tnl_neigh_cache.br_name, out_dev->xbridge->name,
3672 sizeof entry->tnl_neigh_cache.br_name);
3673 entry->tnl_neigh_cache.d_ipv6 = d_ip6;
3674 }
3675
3676 xlate_report(ctx, OFT_DETAIL, "tunneling from "ETH_ADDR_FMT" %s"
3677 " to "ETH_ADDR_FMT" %s",
3678 ETH_ADDR_ARGS(smac), ipv6_string_mapped(buf_sip6, &s_ip6),
3679 ETH_ADDR_ARGS(dmac), buf_dip6);
3680
3681 netdev_init_tnl_build_header_params(&tnl_params, flow, &s_ip6, dmac, smac);
3682 err = tnl_port_build_header(xport->ofport, &tnl_push_data, &tnl_params);
3683 if (err) {
3684 return err;
3685 }
3686 tnl_push_data.tnl_port = tunnel_odp_port;
3687 tnl_push_data.out_port = out_dev->odp_port;
3688
3689 /* After tunnel header has been added, MAC and IP data of flow and
3690 * base_flow need to be set properly, since there is not recirculation
3691 * any more when sending packet to tunnel. */
3692
3693 propagate_tunnel_data_to_flow(ctx, dmac, smac, s_ip6,
3694 s_ip, tnl_params.is_ipv6,
3695 tnl_push_data.tnl_type);
3696
3697 size_t clone_ofs = 0;
3698 size_t push_action_size;
3699
3700 clone_ofs = nl_msg_start_nested(ctx->odp_actions, OVS_ACTION_ATTR_CLONE);
3701 odp_put_tnl_push_action(ctx->odp_actions, &tnl_push_data);
3702 push_action_size = ctx->odp_actions->size;
3703
3704 if (!truncate) {
3705 const struct dpif_flow_stats *backup_resubmit_stats;
3706 struct xlate_cache *backup_xcache;
3707 struct flow_wildcards *backup_wc, wc;
3708 bool backup_side_effects;
3709 const struct dp_packet *backup_packet;
3710
3711 memset(&wc, 0 , sizeof wc);
3712 backup_wc = ctx->wc;
3713 ctx->wc = &wc;
3714 ctx->xin->wc = NULL;
3715 backup_resubmit_stats = ctx->xin->resubmit_stats;
3716 backup_xcache = ctx->xin->xcache;
3717 backup_side_effects = ctx->xin->allow_side_effects;
3718 backup_packet = ctx->xin->packet;
3719
3720 ctx->xin->resubmit_stats = NULL;
3721 ctx->xin->xcache = xlate_cache_new(); /* Use new temporary cache. */
3722 ctx->xin->allow_side_effects = false;
3723 ctx->xin->packet = NULL;
3724
3725 /* Push the cache entry for the tunnel first. */
3726 struct xc_entry *entry;
3727 entry = xlate_cache_add_entry(ctx->xin->xcache, XC_TUNNEL_HEADER);
3728 entry->tunnel_hdr.hdr_size = tnl_push_data.header_len;
3729 entry->tunnel_hdr.operation = ADD;
3730
3731 patch_port_output(ctx, xport, out_dev);
3732
3733 /* Similar to the stats update in revalidation, the x_cache entries
3734 * are populated by the previous translation are used to update the
3735 * stats correctly.
3736 */
3737 if (backup_resubmit_stats) {
3738 struct dpif_flow_stats stats = *backup_resubmit_stats;
3739 xlate_push_stats(ctx->xin->xcache, &stats, false);
3740 }
3741 xlate_cache_steal_entries(backup_xcache, ctx->xin->xcache);
3742
3743 if (ctx->odp_actions->size > push_action_size) {
3744 nl_msg_end_non_empty_nested(ctx->odp_actions, clone_ofs);
3745 } else {
3746 nl_msg_cancel_nested(ctx->odp_actions, clone_ofs);
3747 }
3748
3749 /* Restore context status. */
3750 ctx->xin->resubmit_stats = backup_resubmit_stats;
3751 xlate_cache_delete(ctx->xin->xcache);
3752 ctx->xin->xcache = backup_xcache;
3753 ctx->xin->allow_side_effects = backup_side_effects;
3754 ctx->xin->packet = backup_packet;
3755 ctx->wc = backup_wc;
3756 } else {
3757 /* In order to maintain accurate stats, use recirc for
3758 * natvie tunneling. */
3759 nl_msg_put_u32(ctx->odp_actions, OVS_ACTION_ATTR_RECIRC, 0);
3760 nl_msg_end_nested(ctx->odp_actions, clone_ofs);
3761 }
3762
3763 /* Restore the flows after the translation. */
3764 memcpy(&ctx->xin->flow, &old_flow, sizeof ctx->xin->flow);
3765 memcpy(&ctx->base_flow, &old_base_flow, sizeof ctx->base_flow);
3766
3767 /* Restore sFlow data. */
3768 ctx->sflow_n_outputs = sflow_n_outputs;
3769
3770 return 0;
3771 }
3772
3773 static void
3774 xlate_commit_actions(struct xlate_ctx *ctx)
3775 {
3776 bool use_masked = ctx->xbridge->support.masked_set_action;
3777
3778 ctx->xout->slow |= commit_odp_actions(&ctx->xin->flow, &ctx->base_flow,
3779 ctx->odp_actions, ctx->wc,
3780 use_masked, ctx->pending_encap,
3781 ctx->pending_decap, ctx->encap_data);
3782 ctx->pending_encap = false;
3783 ctx->pending_decap = false;
3784 ofpbuf_delete(ctx->encap_data);
3785 ctx->encap_data = NULL;
3786 }
3787
3788 static void
3789 clear_conntrack(struct xlate_ctx *ctx)
3790 {
3791 ctx->conntracked = false;
3792 flow_clear_conntrack(&ctx->xin->flow);
3793 }
3794
3795 static bool
3796 xlate_flow_is_protected(const struct xlate_ctx *ctx, const struct flow *flow, const struct xport *xport_out)
3797 {
3798 const struct xport *xport_in;
3799
3800 if (!xport_out) {
3801 return false;
3802 }
3803
3804 xport_in = get_ofp_port(ctx->xbridge, flow->in_port.ofp_port);
3805
3806 return (xport_in && xport_in->xbundle && xport_out->xbundle &&
3807 xport_in->xbundle->protected && xport_out->xbundle->protected);
3808 }
3809
3810 /* Function handles when a packet is sent from one bridge to another bridge.
3811 *
3812 * The bridges are internally connected, either with patch ports or with
3813 * tunnel ports.
3814 *
3815 * The output action to another bridge causes translation to continue within
3816 * the next bridge. This process can be recursive; the next bridge can
3817 * output yet to another bridge.
3818 *
3819 * The translated actions from the second bridge onwards are enclosed within
3820 * the clone action, so that any modification to the packet will not be visible
3821 * to the remaining actions of the originating bridge.
3822 */
3823 static void
3824 patch_port_output(struct xlate_ctx *ctx, const struct xport *in_dev,
3825 struct xport *out_dev)
3826 {
3827 struct flow *flow = &ctx->xin->flow;
3828 struct flow old_flow = ctx->xin->flow;
3829 struct flow_tnl old_flow_tnl_wc = ctx->wc->masks.tunnel;
3830 bool old_conntrack = ctx->conntracked;
3831 bool old_was_mpls = ctx->was_mpls;
3832 ovs_version_t old_version = ctx->xin->tables_version;
3833 struct ofpbuf old_stack = ctx->stack;
3834 uint8_t new_stack[1024];
3835 struct ofpbuf old_action_set = ctx->action_set;
3836 struct ovs_list *old_trace = ctx->xin->trace;
3837 uint64_t actset_stub[1024 / 8];
3838
3839 ofpbuf_use_stub(&ctx->stack, new_stack, sizeof new_stack);
3840 ofpbuf_use_stub(&ctx->action_set, actset_stub, sizeof actset_stub);
3841 flow->in_port.ofp_port = out_dev->ofp_port;
3842 flow->metadata = htonll(0);
3843 memset(&flow->tunnel, 0, sizeof flow->tunnel);
3844 memset(&ctx->wc->masks.tunnel, 0, sizeof ctx->wc->masks.tunnel);
3845 flow->tunnel.metadata.tab =
3846 ofproto_get_tun_tab(&out_dev->xbridge->ofproto->up);
3847 ctx->wc->masks.tunnel.metadata.tab = flow->tunnel.metadata.tab;
3848 memset(flow->regs, 0, sizeof flow->regs);
3849 flow->actset_output = OFPP_UNSET;
3850 clear_conntrack(ctx);
3851 ctx->xin->trace = xlate_report(ctx, OFT_BRIDGE, "bridge(\"%s\")",
3852 out_dev->xbridge->name);
3853 mirror_mask_t old_mirrors = ctx->mirrors;
3854 bool independent_mirrors = out_dev->xbridge != ctx->xbridge;
3855 if (independent_mirrors) {
3856 ctx->mirrors = 0;
3857 }
3858 ctx->xbridge = out_dev->xbridge;
3859
3860 /* The bridge is now known so obtain its table version. */
3861 ctx->xin->tables_version
3862 = ofproto_dpif_get_tables_version(ctx->xbridge->ofproto);
3863
3864 if (!process_special(ctx, out_dev) && may_receive(out_dev, ctx)) {
3865 if (xport_stp_forward_state(out_dev) &&
3866 xport_rstp_forward_state(out_dev)) {
3867 xlate_table_action(ctx, flow->in_port.ofp_port, 0, true, true,
3868 false, true, clone_xlate_actions);
3869 if (!ctx->freezing) {
3870 xlate_action_set(ctx);
3871 }
3872 if (ctx->freezing) {
3873 finish_freezing(ctx);
3874 }
3875 } else {
3876 /* Forwarding is disabled by STP and RSTP. Let OFPP_NORMAL and
3877 * the learning action look at the packet, then drop it. */
3878 struct flow old_base_flow = ctx->base_flow;
3879 size_t old_size = ctx->odp_actions->size;
3880 mirror_mask_t old_mirrors2 = ctx->mirrors;
3881
3882 xlate_table_action(ctx, flow->in_port.ofp_port, 0, true, true,
3883 false, true, clone_xlate_actions);
3884 ctx->mirrors = old_mirrors2;
3885 ctx->base_flow = old_base_flow;
3886 ctx->odp_actions->size = old_size;
3887
3888 /* Undo changes that may have been done for freezing. */
3889 ctx_cancel_freeze(ctx);
3890 }
3891 }
3892
3893 ctx->xin->trace = old_trace;
3894 if (independent_mirrors) {
3895 ctx->mirrors = old_mirrors;
3896 }
3897 ctx->xin->flow = old_flow;
3898 ctx->xbridge = in_dev->xbridge;
3899 ofpbuf_uninit(&ctx->action_set);
3900 ctx->action_set = old_action_set;
3901 ofpbuf_uninit(&ctx->stack);
3902 ctx->stack = old_stack;
3903
3904 /* Restore calling bridge's lookup version. */
3905 ctx->xin->tables_version = old_version;
3906
3907 /* Restore to calling bridge tunneling information */
3908 ctx->wc->masks.tunnel = old_flow_tnl_wc;
3909
3910 /* The out bridge popping MPLS should have no effect on the original
3911 * bridge. */
3912 ctx->was_mpls = old_was_mpls;
3913
3914 /* The out bridge's conntrack execution should have no effect on the
3915 * original bridge. */
3916 ctx->conntracked = old_conntrack;
3917
3918 /* The fact that the out bridge exits (for any reason) does not mean
3919 * that the original bridge should exit. Specifically, if the out
3920 * bridge freezes translation, the original bridge must continue
3921 * processing with the original, not the frozen packet! */
3922 ctx->exit = false;
3923
3924 /* Out bridge errors do not propagate back. */
3925 ctx->error = XLATE_OK;
3926
3927 if (ctx->xin->resubmit_stats) {
3928 netdev_vport_inc_tx(in_dev->netdev, ctx->xin->resubmit_stats);
3929 netdev_vport_inc_rx(out_dev->netdev, ctx->xin->resubmit_stats);
3930 if (out_dev->bfd) {
3931 bfd_account_rx(out_dev->bfd, ctx->xin->resubmit_stats);
3932 }
3933 }
3934 if (ctx->xin->xcache) {
3935 struct xc_entry *entry;
3936
3937 entry = xlate_cache_add_entry(ctx->xin->xcache, XC_NETDEV);
3938 entry->dev.tx = netdev_ref(in_dev->netdev);
3939 entry->dev.rx = netdev_ref(out_dev->netdev);
3940 entry->dev.bfd = bfd_ref(out_dev->bfd);
3941 }
3942 }
3943
3944 static bool
3945 check_output_prerequisites(struct xlate_ctx *ctx,
3946 const struct xport *xport,
3947 struct flow *flow,
3948 bool check_stp)
3949 {
3950 struct flow_wildcards *wc = ctx->wc;
3951
3952 if (!xport) {
3953 xlate_report(ctx, OFT_WARN, "Nonexistent output port");
3954 return false;
3955 } else if (xport->config & OFPUTIL_PC_NO_FWD) {
3956 xlate_report(ctx, OFT_DETAIL, "OFPPC_NO_FWD set, skipping output");
3957 return false;
3958 } else if (ctx->mirror_snaplen != 0 && xport->odp_port == ODPP_NONE) {
3959 xlate_report(ctx, OFT_WARN,
3960 "Mirror truncate to ODPP_NONE, skipping output");
3961 return false;
3962 } else if (xlate_flow_is_protected(ctx, flow, xport)) {
3963 xlate_report(ctx, OFT_WARN,
3964 "Flow is between protected ports, skipping output.");
3965 return false;
3966 } else if (check_stp) {
3967 if (is_stp(&ctx->base_flow)) {
3968 if (!xport_stp_should_forward_bpdu(xport) &&
3969 !xport_rstp_should_manage_bpdu(xport)) {
3970 if (ctx->xbridge->stp != NULL) {
3971 xlate_report(ctx, OFT_WARN,
3972 "STP not in listening state, "
3973 "skipping bpdu output");
3974 } else if (ctx->xbridge->rstp != NULL) {
3975 xlate_report(ctx, OFT_WARN,
3976 "RSTP not managing BPDU in this state, "
3977 "skipping bpdu output");
3978 }
3979 return false;
3980 }
3981 } else if ((xport->cfm && cfm_should_process_flow(xport->cfm, flow, wc))
3982 || (xport->bfd && bfd_should_process_flow(xport->bfd, flow,
3983 wc))) {
3984 /* Pass; STP should not block link health detection. */
3985 } else if (!xport_stp_forward_state(xport) ||
3986 !xport_rstp_forward_state(xport)) {
3987 if (ctx->xbridge->stp != NULL) {
3988 xlate_report(ctx, OFT_WARN,
3989 "STP not in forwarding state, skipping output");
3990 } else if (ctx->xbridge->rstp != NULL) {
3991 xlate_report(ctx, OFT_WARN,
3992 "RSTP not in forwarding state, skipping output");
3993 }
3994 return false;
3995 }
3996 }
3997
3998 if (xport->pt_mode == NETDEV_PT_LEGACY_L2 &&
3999 flow->packet_type != htonl(PT_ETH)) {
4000 xlate_report(ctx, OFT_WARN, "Trying to send non-Ethernet packet "
4001 "through legacy L2 port. Dropping packet.");
4002 return false;
4003 }
4004
4005 return true;
4006 }
4007
4008 /* Function verifies if destination address of received Neighbor Advertisement
4009 * message stored in 'flow' is correct. It should be either FF02::1:FFXX:XXXX
4010 * where XX:XXXX stands for the last 24 bits of 'ipv6_addr' or it should match
4011 * 'ipv6_addr'. */
4012 static bool
4013 is_nd_dst_correct(const struct flow *flow, const struct in6_addr *ipv6_addr)
4014 {
4015 const uint8_t *flow_ipv6_addr = (uint8_t *) &flow->ipv6_dst;
4016 const uint8_t *addr = (uint8_t *) ipv6_addr;
4017
4018 return (IN6_IS_ADDR_MC_LINKLOCAL(&flow->ipv6_dst) &&
4019 flow_ipv6_addr[11] == 0x01 &&
4020 flow_ipv6_addr[12] == 0xff &&
4021 flow_ipv6_addr[13] == addr[13] &&
4022 flow_ipv6_addr[14] == addr[14] &&
4023 flow_ipv6_addr[15] == addr[15]) ||
4024 IN6_ARE_ADDR_EQUAL(&flow->ipv6_dst, ipv6_addr);
4025 }
4026
4027 static bool
4028 is_neighbor_reply_matched(const struct flow *flow, struct in6_addr *ip_addr)
4029 {
4030 return ((IN6_IS_ADDR_V4MAPPED(ip_addr) &&
4031 flow->dl_type == htons(ETH_TYPE_ARP) &&
4032 in6_addr_get_mapped_ipv4(ip_addr) == flow->nw_dst) ||
4033 (!IN6_IS_ADDR_V4MAPPED(ip_addr) &&
4034 is_nd_dst_correct(flow, ip_addr)));
4035 }
4036
4037 /* Function verifies if the ARP reply or Neighbor Advertisement represented by
4038 * 'flow' addresses the 'xbridge' of 'ctx'. Returns true if the ARP TA or
4039 * neighbor discovery destination is in the list of configured IP addresses of
4040 * the bridge. Otherwise, it returns false. */
4041 static bool
4042 is_neighbor_reply_correct(const struct xlate_ctx *ctx, const struct flow *flow)
4043 {
4044 bool ret = false;
4045 int i;
4046 struct xbridge_addr *xbridge_addr = xbridge_addr_ref(ctx->xbridge->addr);
4047
4048 /* Verify if 'nw_dst' of ARP or 'ipv6_dst' of ICMPV6 is in the list. */
4049 for (i = 0; xbridge_addr && i < xbridge_addr->n_addr; i++) {
4050 struct in6_addr *ip_addr = &xbridge_addr->addr[i];
4051 if (is_neighbor_reply_matched(flow, ip_addr)) {
4052 /* Found a match. */
4053 ret = true;
4054 break;
4055 }
4056 }
4057
4058 xbridge_addr_unref(xbridge_addr);
4059
4060 /* If not found in bridge's IPs, search in its ports. */
4061 if (!ret) {
4062 struct in6_addr *ip_addr, *mask;
4063 struct xport *port;
4064 int error, n_in6;
4065
4066 HMAP_FOR_EACH (port, ofp_node, &ctx->xbridge->xports) {
4067 error = netdev_get_addr_list(port->netdev, &ip_addr,
4068 &mask, &n_in6);
4069 if (!error) {
4070 ret = is_neighbor_reply_matched(flow, ip_addr);
4071 free(ip_addr);
4072 free(mask);
4073 if (ret) {
4074 /* Found a match. */
4075 break;
4076 }
4077 }
4078 }
4079 }
4080 return ret;
4081 }
4082
4083 static bool
4084 terminate_native_tunnel(struct xlate_ctx *ctx, struct flow *flow,
4085 struct flow_wildcards *wc, odp_port_t *tnl_port)
4086 {
4087 *tnl_port = ODPP_NONE;
4088
4089 /* XXX: Write better Filter for tunnel port. We can use in_port
4090 * in tunnel-port flow to avoid these checks completely. */
4091 if (ovs_native_tunneling_is_on(ctx->xbridge->ofproto)) {
4092 *tnl_port = tnl_port_map_lookup(flow, wc);
4093
4094 /* If no tunnel port was found and it's about an ARP or ICMPv6 packet,
4095 * do tunnel neighbor snooping. */
4096 if (*tnl_port == ODPP_NONE &&
4097 (flow->dl_type == htons(ETH_TYPE_ARP) ||
4098 flow->nw_proto == IPPROTO_ICMPV6) &&
4099 is_neighbor_reply_correct(ctx, flow)) {
4100 tnl_neigh_snoop(flow, wc, ctx->xbridge->name);
4101 }
4102 }
4103
4104 return *tnl_port != ODPP_NONE;
4105 }
4106
4107 static void
4108 compose_output_action__(struct xlate_ctx *ctx, ofp_port_t ofp_port,
4109 const struct xlate_bond_recirc *xr, bool check_stp,
4110 bool is_last_action OVS_UNUSED, bool truncate)
4111 {
4112 const struct xport *xport = get_ofp_port(ctx->xbridge, ofp_port);
4113 struct flow_wildcards *wc = ctx->wc;
4114 struct flow *flow = &ctx->xin->flow;
4115 struct flow_tnl flow_tnl;
4116 union flow_vlan_hdr flow_vlans[FLOW_MAX_VLAN_HEADERS];
4117 uint8_t flow_nw_tos;
4118 odp_port_t out_port, odp_port, odp_tnl_port;
4119 bool is_native_tunnel = false;
4120 uint8_t dscp;
4121 struct eth_addr flow_dl_dst = flow->dl_dst;
4122 struct eth_addr flow_dl_src = flow->dl_src;
4123 ovs_be32 flow_packet_type = flow->packet_type;
4124 ovs_be16 flow_dl_type = flow->dl_type;
4125
4126 /* If 'struct flow' gets additional metadata, we'll need to zero it out
4127 * before traversing a patch port. */
4128 BUILD_ASSERT_DECL(FLOW_WC_SEQ == 42);
4129 memset(&flow_tnl, 0, sizeof flow_tnl);
4130
4131 if (!check_output_prerequisites(ctx, xport, flow, check_stp)) {
4132 return;
4133 }
4134
4135 if (flow->packet_type == htonl(PT_ETH)) {
4136 /* Strip Ethernet header for legacy L3 port. */
4137 if (xport->pt_mode == NETDEV_PT_LEGACY_L3) {
4138 flow->packet_type = PACKET_TYPE_BE(OFPHTN_ETHERTYPE,
4139 ntohs(flow->dl_type));
4140 }
4141 }
4142
4143 if (xport->peer) {
4144 if (truncate) {
4145 xlate_report_error(ctx, "Cannot truncate output to patch port");
4146 }
4147 patch_port_output(ctx, xport, xport->peer);
4148 return;
4149 }
4150
4151 memcpy(flow_vlans, flow->vlans, sizeof flow_vlans);
4152 flow_nw_tos = flow->nw_tos;
4153
4154 if (count_skb_priorities(xport)) {
4155 memset(&wc->masks.skb_priority, 0xff, sizeof wc->masks.skb_priority);
4156 if (dscp_from_skb_priority(xport, flow->skb_priority, &dscp)) {
4157 wc->masks.nw_tos |= IP_DSCP_MASK;
4158 flow->nw_tos &= ~IP_DSCP_MASK;
4159 flow->nw_tos |= dscp;
4160 }
4161 }
4162
4163 if (xport->is_tunnel) {
4164 struct in6_addr dst;
4165 /* Save tunnel metadata so that changes made due to
4166 * the Logical (tunnel) Port are not visible for any further
4167 * matches, while explicit set actions on tunnel metadata are.
4168 */
4169 flow_tnl = flow->tunnel;
4170 odp_port = tnl_port_send(xport->ofport, flow, ctx->wc);
4171 if (odp_port == ODPP_NONE) {
4172 xlate_report(ctx, OFT_WARN, "Tunneling decided against output");
4173 goto out; /* restore flow_nw_tos */
4174 }
4175 dst = flow_tnl_dst(&flow->tunnel);
4176 if (ipv6_addr_equals(&dst, &ctx->orig_tunnel_ipv6_dst)) {
4177 xlate_report(ctx, OFT_WARN, "Not tunneling to our own address");
4178 goto out; /* restore flow_nw_tos */
4179 }
4180 if (ctx->xin->resubmit_stats) {
4181 netdev_vport_inc_tx(xport->netdev, ctx->xin->resubmit_stats);
4182 }
4183 if (ctx->xin->xcache) {
4184 struct xc_entry *entry;
4185
4186 entry = xlate_cache_add_entry(ctx->xin->xcache, XC_NETDEV);
4187 entry->dev.tx = netdev_ref(xport->netdev);
4188 }
4189 out_port = odp_port;
4190 if (ovs_native_tunneling_is_on(ctx->xbridge->ofproto)) {
4191 xlate_report(ctx, OFT_DETAIL, "output to native tunnel");
4192 is_native_tunnel = true;
4193 } else {
4194 const char *tnl_type;
4195
4196 xlate_report(ctx, OFT_DETAIL, "output to kernel tunnel");
4197 tnl_type = tnl_port_get_type(xport->ofport);
4198 commit_odp_tunnel_action(flow, &ctx->base_flow,
4199 ctx->odp_actions, tnl_type);
4200 flow->tunnel = flow_tnl; /* Restore tunnel metadata */
4201 }
4202 } else {
4203 odp_port = xport->odp_port;
4204 out_port = odp_port;
4205 }
4206
4207 if (out_port != ODPP_NONE) {
4208 /* Commit accumulated flow updates before output. */
4209 xlate_commit_actions(ctx);
4210
4211 if (xr && bond_use_lb_output_action(xport->xbundle->bond)) {
4212 /*
4213 * If bond mode is balance-tcp and optimize balance tcp is enabled
4214 * then use the hash directly for member selection and avoid
4215 * recirculation.
4216 *
4217 * Currently support for netdev datapath only.
4218 */
4219 nl_msg_put_u32(ctx->odp_actions, OVS_ACTION_ATTR_LB_OUTPUT,
4220 xr->recirc_id);
4221 } else if (xr) {
4222 /* Recirculate the packet. */
4223 struct ovs_action_hash *act_hash;
4224
4225 /* Hash action. */
4226 enum ovs_hash_alg hash_alg = xr->hash_alg;
4227 if (hash_alg > ctx->xbridge->support.max_hash_alg) {
4228 /* Algorithm supported by all datapaths. */
4229 hash_alg = OVS_HASH_ALG_L4;
4230 }
4231 act_hash = nl_msg_put_unspec_uninit(ctx->odp_actions,
4232 OVS_ACTION_ATTR_HASH,
4233 sizeof *act_hash);
4234 act_hash->hash_alg = hash_alg;
4235 act_hash->hash_basis = xr->hash_basis;
4236
4237 /* Recirc action. */
4238 nl_msg_put_u32(ctx->odp_actions, OVS_ACTION_ATTR_RECIRC,
4239 xr->recirc_id);
4240 } else if (is_native_tunnel) {
4241 /* Output to native tunnel port. */
4242 native_tunnel_output(ctx, xport, flow, odp_port, truncate);
4243 flow->tunnel = flow_tnl; /* Restore tunnel metadata */
4244
4245 } else if (terminate_native_tunnel(ctx, flow, wc,
4246 &odp_tnl_port)) {
4247 /* Intercept packet to be received on native tunnel port. */
4248 nl_msg_put_odp_port(ctx->odp_actions, OVS_ACTION_ATTR_TUNNEL_POP,
4249 odp_tnl_port);
4250
4251 } else {
4252 /* Tunnel push-pop action is not compatible with
4253 * IPFIX action. */
4254 compose_ipfix_action(ctx, out_port);
4255
4256 /* Handle truncation of the mirrored packet. */
4257 if (ctx->mirror_snaplen > 0 &&
4258 ctx->mirror_snaplen < UINT16_MAX) {
4259 struct ovs_action_trunc *trunc;
4260
4261 trunc = nl_msg_put_unspec_uninit(ctx->odp_actions,
4262 OVS_ACTION_ATTR_TRUNC,
4263 sizeof *trunc);
4264 trunc->max_len = ctx->mirror_snaplen;
4265 if (!ctx->xbridge->support.trunc) {
4266 ctx->xout->slow |= SLOW_ACTION;
4267 }
4268 }
4269
4270 nl_msg_put_odp_port(ctx->odp_actions,
4271 OVS_ACTION_ATTR_OUTPUT,
4272 out_port);
4273 }
4274
4275 ctx->sflow_odp_port = odp_port;
4276 ctx->sflow_n_outputs++;
4277 ctx->nf_output_iface = ofp_port;
4278 }
4279
4280 if (mbridge_has_mirrors(ctx->xbridge->mbridge) && xport->xbundle) {
4281 mirror_packet(ctx, xport->xbundle,
4282 xbundle_mirror_dst(xport->xbundle->xbridge,
4283 xport->xbundle));
4284 }
4285
4286 out:
4287 /* Restore flow */
4288 memcpy(flow->vlans, flow_vlans, sizeof flow->vlans);
4289 flow->nw_tos = flow_nw_tos;
4290 flow->dl_dst = flow_dl_dst;
4291 flow->dl_src = flow_dl_src;
4292 flow->packet_type = flow_packet_type;
4293 flow->dl_type = flow_dl_type;
4294 }
4295
4296 static void
4297 compose_output_action(struct xlate_ctx *ctx, ofp_port_t ofp_port,
4298 const struct xlate_bond_recirc *xr,
4299 bool is_last_action, bool truncate)
4300 {
4301 compose_output_action__(ctx, ofp_port, xr, true,
4302 is_last_action, truncate);
4303 }
4304
4305 static void
4306 xlate_recursively(struct xlate_ctx *ctx, struct rule_dpif *rule,
4307 bool deepens, bool is_last_action,
4308 xlate_actions_handler *actions_xlator)
4309 {
4310 struct rule_dpif *old_rule = ctx->rule;
4311 ovs_be64 old_cookie = ctx->rule_cookie;
4312 const struct rule_actions *actions;
4313
4314 if (ctx->xin->resubmit_stats) {
4315 rule_dpif_credit_stats(rule, ctx->xin->resubmit_stats, false);
4316 }
4317
4318 ctx->resubmits++;
4319
4320 ctx->depth += deepens;
4321 ctx->rule = rule;
4322 ctx->rule_cookie = rule->up.flow_cookie;
4323 actions = rule_get_actions(&rule->up);
4324 actions_xlator(actions->ofpacts, actions->ofpacts_len, ctx,
4325 is_last_action, false);
4326 ctx->rule_cookie = old_cookie;
4327 ctx->rule = old_rule;
4328 ctx->depth -= deepens;
4329 }
4330
4331 static bool
4332 xlate_resubmit_resource_check(struct xlate_ctx *ctx)
4333 {
4334 if (ctx->depth >= MAX_DEPTH) {
4335 xlate_report_error(ctx, "over max translation depth %d", MAX_DEPTH);
4336 ctx->error = XLATE_RECURSION_TOO_DEEP;
4337 } else if (ctx->resubmits >= MAX_RESUBMITS) {
4338 xlate_report_error(ctx, "over %d resubmit actions", MAX_RESUBMITS);
4339 ctx->error = XLATE_TOO_MANY_RESUBMITS;
4340 } else if (ctx->odp_actions->size > UINT16_MAX) {
4341 xlate_report_error(ctx, "resubmits yielded over 64 kB of actions");
4342 /* NOT an error, as we'll be slow-pathing the flow in this case? */
4343 ctx->exit = true; /* XXX: translation still terminated! */
4344 } else if (ctx->stack.size >= 65536) {
4345 xlate_report_error(ctx, "resubmits yielded over 64 kB of stack");
4346 ctx->error = XLATE_STACK_TOO_DEEP;
4347 } else {
4348 return true;
4349 }
4350
4351 return false;
4352 }
4353
4354 static void
4355 tuple_swap_flow(struct flow *flow, bool ipv4)
4356 {
4357 uint8_t nw_proto = flow->nw_proto;
4358 flow->nw_proto = flow->ct_nw_proto;
4359 flow->ct_nw_proto = nw_proto;
4360
4361 if (ipv4) {
4362 ovs_be32 nw_src = flow->nw_src;
4363 flow->nw_src = flow->ct_nw_src;
4364 flow->ct_nw_src = nw_src;
4365
4366 ovs_be32 nw_dst = flow->nw_dst;
4367 flow->nw_dst = flow->ct_nw_dst;
4368 flow->ct_nw_dst = nw_dst;
4369 } else {
4370 struct in6_addr ipv6_src = flow->ipv6_src;
4371 flow->ipv6_src = flow->ct_ipv6_src;
4372 flow->ct_ipv6_src = ipv6_src;
4373
4374 struct in6_addr ipv6_dst = flow->ipv6_dst;
4375 flow->ipv6_dst = flow->ct_ipv6_dst;
4376 flow->ct_ipv6_dst = ipv6_dst;
4377 }
4378
4379 ovs_be16 tp_src = flow->tp_src;
4380 flow->tp_src = flow->ct_tp_src;
4381 flow->ct_tp_src = tp_src;
4382
4383 ovs_be16 tp_dst = flow->tp_dst;
4384 flow->tp_dst = flow->ct_tp_dst;
4385 flow->ct_tp_dst = tp_dst;
4386 }
4387
4388 static void
4389 tuple_swap(struct flow *flow, struct flow_wildcards *wc)
4390 {
4391 bool ipv4 = (flow->dl_type == htons(ETH_TYPE_IP));
4392
4393 tuple_swap_flow(flow, ipv4);
4394 tuple_swap_flow(&wc->masks, ipv4);
4395 }
4396
4397 static void
4398 xlate_table_action(struct xlate_ctx *ctx, ofp_port_t in_port, uint8_t table_id,
4399 bool may_packet_in, bool honor_table_miss,
4400 bool with_ct_orig, bool is_last_action,
4401 xlate_actions_handler *xlator)
4402 {
4403 /* Check if we need to recirculate before matching in a table. */
4404 if (ctx->was_mpls) {
4405 ctx_trigger_freeze(ctx);
4406 return;
4407 }
4408 if (xlate_resubmit_resource_check(ctx)) {
4409 uint8_t old_table_id = ctx->table_id;
4410 struct rule_dpif *rule;
4411
4412 ctx->table_id = table_id;
4413
4414 /* Swap packet fields with CT 5-tuple if requested. */
4415 if (with_ct_orig) {
4416 /* Do not swap if there is no CT tuple, or if key is not IP. */
4417 if (ctx->xin->flow.ct_nw_proto == 0 ||
4418 !is_ip_any(&ctx->xin->flow)) {
4419 xlate_report_error(ctx,
4420 "resubmit(ct) with non-tracked or non-IP packet!");
4421 ctx->table_id = old_table_id;
4422 return;
4423 }
4424 tuple_swap(&ctx->xin->flow, ctx->wc);
4425 }
4426 rule = rule_dpif_lookup_from_table(ctx->xbridge->ofproto,
4427 ctx->xin->tables_version,
4428 &ctx->xin->flow, ctx->wc,
4429 ctx->xin->resubmit_stats,
4430 &ctx->table_id, in_port,
4431 may_packet_in, honor_table_miss,
4432 ctx->xin->xcache);
4433 /* Swap back. */
4434 if (with_ct_orig) {
4435 tuple_swap(&ctx->xin->flow, ctx->wc);
4436 }
4437
4438 if (rule) {
4439 /* Fill in the cache entry here instead of xlate_recursively
4440 * to make the reference counting more explicit. We take a
4441 * reference in the lookups above if we are going to cache the
4442 * rule. */
4443 if (ctx->xin->xcache) {
4444 struct xc_entry *entry;
4445
4446 entry = xlate_cache_add_entry(ctx->xin->xcache, XC_RULE);
4447 entry->rule = rule;
4448 ofproto_rule_ref(&rule->up);
4449 }
4450
4451 struct ovs_list *old_trace = ctx->xin->trace;
4452 xlate_report_table(ctx, rule, table_id);
4453 xlate_recursively(ctx, rule, table_id <= old_table_id,
4454 is_last_action, xlator);
4455 ctx->xin->trace = old_trace;
4456 }
4457
4458 ctx->table_id = old_table_id;
4459 return;
4460 }
4461 }
4462
4463 /* Consumes the group reference, which is only taken if xcache exists. */
4464 static void
4465 xlate_group_stats(struct xlate_ctx *ctx, struct group_dpif *group,
4466 struct ofputil_bucket *bucket)
4467 {
4468 if (ctx->xin->resubmit_stats) {
4469 group_dpif_credit_stats(group, bucket, ctx->xin->resubmit_stats);
4470 }
4471 if (ctx->xin->xcache) {
4472 struct xc_entry *entry;
4473
4474 entry = xlate_cache_add_entry(ctx->xin->xcache, XC_GROUP);
4475 entry->group.group = group;
4476 entry->group.bucket = bucket;
4477 }
4478 }
4479
4480 static void
4481 xlate_group_bucket(struct xlate_ctx *ctx, struct ofputil_bucket *bucket,
4482 bool is_last_action)
4483 {
4484 struct ovs_list *old_trace = ctx->xin->trace;
4485 if (OVS_UNLIKELY(ctx->xin->trace)) {
4486 char *s = xasprintf("bucket %"PRIu32, bucket->bucket_id);
4487 ctx->xin->trace = &oftrace_report(ctx->xin->trace, OFT_BUCKET,
4488 s)->subs;
4489 free(s);
4490 }
4491
4492 uint64_t action_list_stub[1024 / 8];
4493 struct ofpbuf action_list = OFPBUF_STUB_INITIALIZER(action_list_stub);
4494 struct ofpbuf action_set = ofpbuf_const_initializer(bucket->ofpacts,
4495 bucket->ofpacts_len);
4496 struct flow old_flow = ctx->xin->flow;
4497 bool old_was_mpls = ctx->was_mpls;
4498
4499 ofpacts_execute_action_set(&action_list, &action_set);
4500 ctx->depth++;
4501 do_xlate_actions(action_list.data, action_list.size, ctx, is_last_action,
4502 true);
4503 ctx->depth--;
4504
4505 ofpbuf_uninit(&action_list);
4506
4507 /* Check if need to freeze. */
4508 if (ctx->freezing) {
4509 finish_freezing(ctx);
4510 }
4511
4512 /* Roll back flow to previous state.
4513 * This is equivalent to cloning the packet for each bucket.
4514 *
4515 * As a side effect any subsequently applied actions will
4516 * also effectively be applied to a clone of the packet taken
4517 * just before applying the all or indirect group.
4518 *
4519 * Note that group buckets are action sets, hence they cannot modify the
4520 * main action set. Also any stack actions are ignored when executing an
4521 * action set, so group buckets cannot directly change the stack either.
4522 * However, we do allow resubmit actions in group buckets, which could
4523 * recursively execute actions that do modify the action set or change the
4524 * stack. The controller must be careful about what it does to the
4525 * action_set and stack in the tables resubmitted to from group buckets. */
4526 ctx->xin->flow = old_flow;
4527
4528 /* The group bucket popping MPLS should have no effect after bucket
4529 * execution. */
4530 ctx->was_mpls = old_was_mpls;
4531
4532 /* The fact that the group bucket exits (for any reason) does not mean that
4533 * the translation after the group action should exit. Specifically, if
4534 * the group bucket freezes translation, the actions after the group action
4535 * must continue processing with the original, not the frozen packet! */
4536 ctx->exit = false;
4537
4538 /* Context error in a bucket should not impact processing of other buckets
4539 * or actions. This is similar to cloning a packet for group buckets.
4540 * There is no need to restore the error back to old value due to the fact
4541 * that we actually processed group action which can happen only when there
4542 * is no previous context error.
4543 *
4544 * Exception to above is errors which are system limits to protect
4545 * translation from running too long or occupy too much space. These errors
4546 * should not be masked. XLATE_RECURSION_TOO_DEEP, XLATE_TOO_MANY_RESUBMITS
4547 * and XLATE_STACK_TOO_DEEP fall in this category. */
4548 if (ctx->error == XLATE_TOO_MANY_MPLS_LABELS ||
4549 ctx->error == XLATE_UNSUPPORTED_PACKET_TYPE) {
4550 /* reset the error and continue processing other buckets */
4551 ctx->error = XLATE_OK;
4552 }
4553
4554 ctx->xin->trace = old_trace;
4555 }
4556
4557 static struct ofputil_bucket *
4558 pick_ff_group(struct xlate_ctx *ctx, struct group_dpif *group)
4559 {
4560 return group_first_live_bucket(ctx, group, 0);
4561 }
4562
4563 static struct ofputil_bucket *
4564 pick_default_select_group(struct xlate_ctx *ctx, struct group_dpif *group)
4565 {
4566 flow_mask_hash_fields(&ctx->xin->flow, ctx->wc,
4567 NX_HASH_FIELDS_SYMMETRIC_L4);
4568 return group_best_live_bucket(ctx, group,
4569 flow_hash_symmetric_l4(&ctx->xin->flow, 0));
4570 }
4571
4572 static struct ofputil_bucket *
4573 pick_hash_fields_select_group(struct xlate_ctx *ctx, struct group_dpif *group)
4574 {
4575 const struct field_array *fields = &group->up.props.fields;
4576 const uint8_t *mask_values = fields->values;
4577 uint32_t basis = hash_uint64(group->up.props.selection_method_param);
4578
4579 size_t i;
4580 BITMAP_FOR_EACH_1 (i, MFF_N_IDS, fields->used.bm) {
4581 const struct mf_field *mf = mf_from_id(i);
4582
4583 /* Skip fields for which prerequisites are not met. */
4584 if (!mf_are_prereqs_ok(mf, &ctx->xin->flow, ctx->wc)) {
4585 /* Skip the mask bytes for this field. */
4586 mask_values += mf->n_bytes;
4587 continue;
4588 }
4589
4590 union mf_value value;
4591 union mf_value mask;
4592
4593 mf_get_value(mf, &ctx->xin->flow, &value);
4594 /* Mask the value. */
4595 for (int j = 0; j < mf->n_bytes; j++) {
4596 mask.b[j] = *mask_values++;
4597 value.b[j] &= mask.b[j];
4598 }
4599 basis = hash_bytes(&value, mf->n_bytes, basis);
4600
4601 /* For tunnels, hash in whether the field is present. */
4602 if (mf_is_tun_metadata(mf)) {
4603 basis = hash_boolean(mf_is_set(mf, &ctx->xin->flow), basis);
4604 }
4605
4606 mf_mask_field_masked(mf, &mask, ctx->wc);
4607 }
4608
4609 return group_best_live_bucket(ctx, group, basis);
4610 }
4611
4612 static struct ofputil_bucket *
4613 pick_dp_hash_select_group(struct xlate_ctx *ctx, struct group_dpif *group)
4614 {
4615 uint32_t dp_hash = ctx->xin->flow.dp_hash;
4616
4617 /* dp_hash value 0 is special since it means that the dp_hash has not been
4618 * computed, as all computed dp_hash values are non-zero. Therefore
4619 * compare to zero can be used to decide if the dp_hash value is valid
4620 * without masking the dp_hash field. */
4621 if (!dp_hash) {
4622 enum ovs_hash_alg hash_alg = group->hash_alg;
4623 if (hash_alg > ctx->xbridge->support.max_hash_alg) {
4624 /* Algorithm supported by all datapaths. */
4625 hash_alg = OVS_HASH_ALG_L4;
4626 }
4627 ctx_trigger_recirculate_with_hash(ctx, hash_alg, group->hash_basis);
4628 return NULL;
4629 } else {
4630 uint32_t hash_mask = group->hash_mask;
4631 ctx->wc->masks.dp_hash |= hash_mask;
4632
4633 /* Starting from the original masked dp_hash value iterate over the
4634 * hash mapping table to find the first live bucket. As the buckets
4635 * are quasi-randomly spread over the hash values, this maintains
4636 * a distribution according to bucket weights even when some buckets
4637 * are non-live. */
4638 for (int i = 0; i <= hash_mask; i++) {
4639 struct ofputil_bucket *b =
4640 group->hash_map[(dp_hash + i) & hash_mask];
4641 if (bucket_is_alive(ctx, b, 0)) {
4642 return b;
4643 }
4644 }
4645
4646 return NULL;
4647 }
4648 }
4649
4650 static struct ofputil_bucket *
4651 pick_select_group(struct xlate_ctx *ctx, struct group_dpif *group)
4652 {
4653 /* Select groups may access flow keys beyond L2 in order to
4654 * select a bucket. Recirculate as appropriate to make this possible.
4655 */
4656 if (ctx->was_mpls) {
4657 ctx_trigger_freeze(ctx);
4658 return NULL;
4659 }
4660
4661 switch (group->selection_method) {
4662 case SEL_METHOD_DEFAULT:
4663 return pick_default_select_group(ctx, group);
4664 break;
4665 case SEL_METHOD_HASH:
4666 return pick_hash_fields_select_group(ctx, group);
4667 break;
4668 case SEL_METHOD_DP_HASH:
4669 return pick_dp_hash_select_group(ctx, group);
4670 break;
4671 default:
4672 /* Parsing of groups ensures this never happens */
4673 OVS_NOT_REACHED();
4674 }
4675
4676 return NULL;
4677 }
4678
4679 static void
4680 xlate_group_action__(struct xlate_ctx *ctx, struct group_dpif *group,
4681 bool is_last_action)
4682 {
4683 if (group->up.type == OFPGT11_ALL || group->up.type == OFPGT11_INDIRECT) {
4684 struct ovs_list *last_bucket = group->up.buckets.prev;
4685 struct ofputil_bucket *bucket;
4686 LIST_FOR_EACH (bucket, list_node, &group->up.buckets) {
4687 bool is_last_bucket = &bucket->list_node == last_bucket;
4688 xlate_group_bucket(ctx, bucket, is_last_action && is_last_bucket);
4689 }
4690 xlate_group_stats(ctx, group, NULL);
4691 } else {
4692 struct ofputil_bucket *bucket;
4693 if (group->up.type == OFPGT11_SELECT) {
4694 bucket = pick_select_group(ctx, group);
4695 } else if (group->up.type == OFPGT11_FF) {
4696 bucket = pick_ff_group(ctx, group);
4697 } else {
4698 OVS_NOT_REACHED();
4699 }
4700
4701 if (bucket) {
4702 xlate_report(ctx, OFT_DETAIL, "using bucket %"PRIu32,
4703 bucket->bucket_id);
4704 xlate_group_bucket(ctx, bucket, is_last_action);
4705 xlate_group_stats(ctx, group, bucket);
4706 } else {
4707 xlate_report(ctx, OFT_DETAIL, "no live bucket");
4708 if (ctx->xin->xcache) {
4709 ofproto_group_unref(&group->up);
4710 }
4711 }
4712 }
4713 }
4714
4715 static bool
4716 xlate_group_action(struct xlate_ctx *ctx, uint32_t group_id,
4717 bool is_last_action)
4718 {
4719 if (xlate_resubmit_resource_check(ctx)) {
4720 struct group_dpif *group;
4721
4722 /* Take ref only if xcache exists. */
4723 group = group_dpif_lookup(ctx->xbridge->ofproto, group_id,
4724 ctx->xin->tables_version, ctx->xin->xcache);
4725 if (!group) {
4726 /* XXX: Should set ctx->error ? */
4727 xlate_report(ctx, OFT_WARN, "output to nonexistent group %"PRIu32,
4728 group_id);
4729 return true;
4730 }
4731 xlate_group_action__(ctx, group, is_last_action);
4732 }
4733
4734 return false;
4735 }
4736
4737 static void
4738 xlate_ofpact_resubmit(struct xlate_ctx *ctx,
4739 const struct ofpact_resubmit *resubmit,
4740 bool is_last_action)
4741 {
4742 ofp_port_t in_port;
4743 uint8_t table_id;
4744 bool may_packet_in = false;
4745 bool honor_table_miss = false;
4746
4747 if (ctx->rule && rule_dpif_is_internal(ctx->rule)) {
4748 /* Still allow missed packets to be sent to the controller
4749 * if resubmitting from an internal table. */
4750 may_packet_in = true;
4751 honor_table_miss = true;
4752 }
4753
4754 in_port = resubmit->in_port;
4755 if (in_port == OFPP_IN_PORT) {
4756 in_port = ctx->xin->flow.in_port.ofp_port;
4757 }
4758
4759 table_id = resubmit->table_id;
4760 if (table_id == 255) {
4761 table_id = ctx->table_id;
4762 }
4763
4764 xlate_table_action(ctx, in_port, table_id, may_packet_in,
4765 honor_table_miss, resubmit->with_ct_orig,
4766 is_last_action, do_xlate_actions);
4767 }
4768
4769 static void
4770 flood_packet_to_port(struct xlate_ctx *ctx, const struct xport *xport,
4771 bool all, bool is_last_action)
4772 {
4773 if (!xport) {
4774 return;
4775 }
4776
4777 if (all) {
4778 compose_output_action__(ctx, xport->ofp_port, NULL, false,
4779 is_last_action, false);
4780 } else {
4781 compose_output_action(ctx, xport->ofp_port, NULL, is_last_action,
4782 false);
4783 }
4784 }
4785
4786 static void
4787 flood_packets(struct xlate_ctx *ctx, bool all, bool is_last_action)
4788 {
4789 const struct xport *xport, *last = NULL;
4790
4791 /* Use 'last' the keep track of the last output port. */
4792 HMAP_FOR_EACH (xport, ofp_node, &ctx->xbridge->xports) {
4793 if (xport->ofp_port == ctx->xin->flow.in_port.ofp_port) {
4794 continue;
4795 }
4796
4797 if (all || !(xport->config & OFPUTIL_PC_NO_FLOOD)) {
4798 /* 'last' is not the last port, send a packet out, and
4799 * update 'last'. */
4800 flood_packet_to_port(ctx, last, all, false);
4801 last = xport;
4802 }
4803 }
4804
4805 /* Send the packet to the 'last' port. */
4806 flood_packet_to_port(ctx, last, all, is_last_action);
4807 ctx->nf_output_iface = NF_OUT_FLOOD;
4808 }
4809
4810 static void
4811 put_controller_user_action(struct xlate_ctx *ctx,
4812 bool dont_send, bool continuation,
4813 uint32_t recirc_id, int len,
4814 enum ofp_packet_in_reason reason,
4815 uint16_t controller_id)
4816 {
4817 struct user_action_cookie cookie;
4818
4819 memset(&cookie, 0, sizeof cookie);
4820 cookie.type = USER_ACTION_COOKIE_CONTROLLER;
4821 cookie.ofp_in_port = OFPP_NONE,
4822 cookie.ofproto_uuid = ctx->xbridge->ofproto->uuid;
4823 cookie.controller.dont_send = dont_send;
4824 cookie.controller.continuation = continuation;
4825 cookie.controller.reason = reason;
4826 cookie.controller.recirc_id = recirc_id;
4827 put_32aligned_be64(&cookie.controller.rule_cookie, ctx->rule_cookie);
4828 cookie.controller.controller_id = controller_id;
4829 cookie.controller.max_len = len;
4830
4831 odp_port_t odp_port = ofp_port_to_odp_port(ctx->xbridge,
4832 ctx->xin->flow.in_port.ofp_port);
4833 uint32_t pid = dpif_port_get_pid(ctx->xbridge->dpif, odp_port);
4834 odp_put_userspace_action(pid, &cookie, sizeof cookie, ODPP_NONE,
4835 false, ctx->odp_actions, NULL);
4836 }
4837
4838 static void
4839 xlate_controller_action(struct xlate_ctx *ctx, int len,
4840 enum ofp_packet_in_reason reason,
4841 uint16_t controller_id,
4842 uint32_t provider_meter_id,
4843 const uint8_t *userdata, size_t userdata_len)
4844 {
4845 xlate_commit_actions(ctx);
4846
4847 /* A packet sent by an action in a table-miss rule is considered an
4848 * explicit table miss. OpenFlow before 1.3 doesn't have that concept so
4849 * it will get translated back to OFPR_ACTION for those versions. */
4850 if (reason == OFPR_ACTION
4851 && ctx->rule && rule_is_table_miss(&ctx->rule->up)) {
4852 reason = OFPR_EXPLICIT_MISS;
4853 }
4854
4855 struct frozen_state state = {
4856 .table_id = ctx->table_id,
4857 .ofproto_uuid = ctx->xbridge->ofproto->uuid,
4858 .stack = ctx->stack.data,
4859 .stack_size = ctx->stack.size,
4860 .mirrors = ctx->mirrors,
4861 .conntracked = ctx->conntracked,
4862 .was_mpls = ctx->was_mpls,
4863 .ofpacts = NULL,
4864 .ofpacts_len = 0,
4865 .action_set = NULL,
4866 .action_set_len = 0,
4867 .userdata = CONST_CAST(uint8_t *, userdata),
4868 .userdata_len = userdata_len,
4869 };
4870 frozen_metadata_from_flow(&state.metadata, &ctx->xin->flow);
4871
4872 uint32_t recirc_id = recirc_alloc_id_ctx(&state);
4873 if (!recirc_id) {
4874 xlate_report_error(ctx, "Failed to allocate recirculation id");
4875 ctx->error = XLATE_NO_RECIRCULATION_CONTEXT;
4876 return;
4877 }
4878 recirc_refs_add(&ctx->xout->recircs, recirc_id);
4879
4880 /* If the controller action didn't request a meter (indicated by a
4881 * 'meter_id' argument other than NX_CTLR_NO_METER), see if one was
4882 * configured through the "controller" virtual meter.
4883 *
4884 * Internally, ovs-vswitchd uses UINT32_MAX to indicate no meter is
4885 * configured. */
4886 uint32_t meter_id;
4887 if (provider_meter_id == UINT32_MAX) {
4888 meter_id = ctx->xbridge->ofproto->up.controller_meter_id;
4889 } else {
4890 meter_id = provider_meter_id;
4891 }
4892
4893 size_t offset;
4894 size_t ac_offset;
4895 if (meter_id != UINT32_MAX) {
4896 /* If controller meter is configured, generate clone(meter, userspace)
4897 * action. */
4898 offset = nl_msg_start_nested(ctx->odp_actions, OVS_ACTION_ATTR_SAMPLE);
4899 nl_msg_put_u32(ctx->odp_actions, OVS_SAMPLE_ATTR_PROBABILITY,
4900 UINT32_MAX);
4901 ac_offset = nl_msg_start_nested(ctx->odp_actions,
4902 OVS_SAMPLE_ATTR_ACTIONS);
4903 nl_msg_put_u32(ctx->odp_actions, OVS_ACTION_ATTR_METER, meter_id);
4904 }
4905
4906 /* Generate the datapath flows even if we don't send the packet-in
4907 * so that debugging more closely represents normal state. */
4908 bool dont_send = false;
4909 if (!ctx->xin->allow_side_effects && !ctx->xin->xcache) {
4910 dont_send = true;
4911 }
4912 put_controller_user_action(ctx, dont_send, false, recirc_id, len,
4913 reason, controller_id);
4914
4915 if (meter_id != UINT32_MAX) {
4916 nl_msg_end_nested(ctx->odp_actions, ac_offset);
4917 nl_msg_end_nested(ctx->odp_actions, offset);
4918 }
4919 }
4920
4921 /* Creates a frozen state, and allocates a unique recirc id for the given
4922 * state. Returns a non-zero recirc id if it is allocated successfully.
4923 * Returns 0 otherwise.
4924 **/
4925 static uint32_t
4926 finish_freezing__(struct xlate_ctx *ctx, uint8_t table)
4927 {
4928 ovs_assert(ctx->freezing);
4929
4930 struct frozen_state state = {
4931 .table_id = table,
4932 .ofproto_uuid = ctx->xbridge->ofproto->uuid,
4933 .stack = ctx->stack.data,
4934 .stack_size = ctx->stack.size,
4935 .mirrors = ctx->mirrors,
4936 .conntracked = ctx->conntracked,
4937 .was_mpls = ctx->was_mpls,
4938 .xport_uuid = ctx->xin->xport_uuid,
4939 .ofpacts = ctx->frozen_actions.data,
4940 .ofpacts_len = ctx->frozen_actions.size,
4941 .action_set = ctx->action_set.data,
4942 .action_set_len = ctx->action_set.size,
4943 .userdata = ctx->pause ? CONST_CAST(uint8_t *,ctx->pause->userdata)
4944 : NULL,
4945 .userdata_len = ctx->pause ? ctx->pause->userdata_len : 0,
4946 };
4947 frozen_metadata_from_flow(&state.metadata, &ctx->xin->flow);
4948
4949 /* Allocate a unique recirc id for the given metadata state in the
4950 * flow. An existing id, with a new reference to the corresponding
4951 * recirculation context, will be returned if possible.
4952 * The life-cycle of this recirc id is managed by associating it
4953 * with the udpif key ('ukey') created for each new datapath flow. */
4954 uint32_t recirc_id = recirc_alloc_id_ctx(&state);
4955 if (!recirc_id) {
4956 xlate_report_error(ctx, "Failed to allocate recirculation id");
4957 ctx->error = XLATE_NO_RECIRCULATION_CONTEXT;
4958 return 0;
4959 }
4960 recirc_refs_add(&ctx->xout->recircs, recirc_id);
4961
4962 if (ctx->pause) {
4963 if (!ctx->xin->allow_side_effects && !ctx->xin->xcache) {
4964 return 0;
4965 }
4966
4967 put_controller_user_action(ctx, false, true, recirc_id,
4968 ctx->pause->max_len,
4969 ctx->pause->reason,
4970 ctx->pause->controller_id);
4971 } else {
4972 if (ctx->recirc_update_dp_hash) {
4973 struct ovs_action_hash *act_hash;
4974
4975 /* Hash action. */
4976 act_hash = nl_msg_put_unspec_uninit(ctx->odp_actions,
4977 OVS_ACTION_ATTR_HASH,
4978 sizeof *act_hash);
4979 act_hash->hash_alg = ctx->dp_hash_alg;
4980 act_hash->hash_basis = ctx->dp_hash_basis;
4981 }
4982 nl_msg_put_u32(ctx->odp_actions, OVS_ACTION_ATTR_RECIRC, recirc_id);
4983 }
4984
4985 /* Undo changes done by freezing. */
4986 ctx_cancel_freeze(ctx);
4987 return recirc_id;
4988 }
4989
4990 /* Called only when we're freezing. */
4991 static void
4992 finish_freezing(struct xlate_ctx *ctx)
4993 {
4994 xlate_commit_actions(ctx);
4995 finish_freezing__(ctx, 0);
4996 }
4997
4998 /* Fork the pipeline here. The current packet will continue processing the
4999 * current action list. A clone of the current packet will recirculate, skip
5000 * the remainder of the current action list and asynchronously resume pipeline
5001 * processing in 'table' with the current metadata and action set. */
5002 static void
5003 compose_recirculate_and_fork(struct xlate_ctx *ctx, uint8_t table,
5004 const uint16_t zone)
5005 {
5006 uint32_t recirc_id;
5007 ctx->freezing = true;
5008 recirc_id = finish_freezing__(ctx, table);
5009
5010 if (OVS_UNLIKELY(ctx->xin->trace) && recirc_id) {
5011 if (oftrace_add_recirc_node(ctx->xin->recirc_queue,
5012 OFT_RECIRC_CONNTRACK, &ctx->xin->flow,
5013 ctx->ct_nat_action, ctx->xin->packet,
5014 recirc_id, zone)) {
5015 xlate_report(ctx, OFT_DETAIL, "A clone of the packet is forked to "
5016 "recirculate. The forked pipeline will be resumed at "
5017 "table %u.", table);
5018 } else {
5019 xlate_report(ctx, OFT_DETAIL, "Failed to trace the conntrack "
5020 "forked pipeline with recirc_id = %d.", recirc_id);
5021 }
5022 }
5023 }
5024
5025 static void
5026 compose_mpls_push_action(struct xlate_ctx *ctx, struct ofpact_push_mpls *mpls)
5027 {
5028 struct flow *flow = &ctx->xin->flow;
5029 int n;
5030
5031 ovs_assert(eth_type_mpls(mpls->ethertype));
5032
5033 n = flow_count_mpls_labels(flow, ctx->wc);
5034 if (!n) {
5035 xlate_commit_actions(ctx);
5036 } else if (n >= FLOW_MAX_MPLS_LABELS) {
5037 if (ctx->xin->packet != NULL) {
5038 xlate_report_error(ctx, "dropping packet on which an MPLS push "
5039 "action can't be performed as it would have "
5040 "more MPLS LSEs than the %d supported.",
5041 FLOW_MAX_MPLS_LABELS);
5042 }
5043 ctx->error = XLATE_TOO_MANY_MPLS_LABELS;
5044 return;
5045 }
5046
5047 /* Update flow's MPLS stack, and clear L3/4 fields to mark them invalid. */
5048 flow_push_mpls(flow, n, mpls->ethertype, ctx->wc, true);
5049 }
5050
5051 static void
5052 compose_mpls_pop_action(struct xlate_ctx *ctx, ovs_be16 eth_type)
5053 {
5054 struct flow *flow = &ctx->xin->flow;
5055 int n = flow_count_mpls_labels(flow, ctx->wc);
5056
5057 if (flow_pop_mpls(flow, n, eth_type, ctx->wc)) {
5058 if (!eth_type_mpls(eth_type) && ctx->xbridge->support.odp.recirc) {
5059 ctx->was_mpls = true;
5060 }
5061 } else if (n >= FLOW_MAX_MPLS_LABELS) {
5062 if (ctx->xin->packet != NULL) {
5063 xlate_report_error(ctx, "dropping packet on which an "
5064 "MPLS pop action can't be performed as it has "
5065 "more MPLS LSEs than the %d supported.",
5066 FLOW_MAX_MPLS_LABELS);
5067 }
5068 ctx->error = XLATE_TOO_MANY_MPLS_LABELS;
5069 ofpbuf_clear(ctx->odp_actions);
5070 }
5071 }
5072
5073 static bool
5074 compose_dec_ttl(struct xlate_ctx *ctx, struct ofpact_cnt_ids *ids)
5075 {
5076 struct flow *flow = &ctx->xin->flow;
5077
5078 if (!is_ip_any(flow)) {
5079 return false;
5080 }
5081
5082 ctx->wc->masks.nw_ttl = 0xff;
5083 if (flow->nw_ttl > 1) {
5084 flow->nw_ttl--;
5085 return false;
5086 } else {
5087 size_t i;
5088
5089 for (i = 0; i < ids->n_controllers; i++) {
5090 xlate_controller_action(ctx, UINT16_MAX, OFPR_INVALID_TTL,
5091 ids->cnt_ids[i], UINT32_MAX, NULL, 0);
5092 }
5093
5094 /* Stop processing for current table. */
5095 xlate_report(ctx, OFT_WARN, "IPv%d decrement TTL exception",
5096 flow->dl_type == htons(ETH_TYPE_IP) ? 4 : 6);
5097 return true;
5098 }
5099 }
5100
5101 static void
5102 compose_set_mpls_label_action(struct xlate_ctx *ctx, ovs_be32 label)
5103 {
5104 if (eth_type_mpls(ctx->xin->flow.dl_type)) {
5105 ctx->wc->masks.mpls_lse[0] |= htonl(MPLS_LABEL_MASK);
5106 set_mpls_lse_label(&ctx->xin->flow.mpls_lse[0], label);
5107 }
5108 }
5109
5110 static void
5111 compose_set_mpls_tc_action(struct xlate_ctx *ctx, uint8_t tc)
5112 {
5113 if (eth_type_mpls(ctx->xin->flow.dl_type)) {
5114 ctx->wc->masks.mpls_lse[0] |= htonl(MPLS_TC_MASK);
5115 set_mpls_lse_tc(&ctx->xin->flow.mpls_lse[0], tc);
5116 }
5117 }
5118
5119 static bool
5120 compose_dec_nsh_ttl_action(struct xlate_ctx *ctx)
5121 {
5122 struct flow *flow = &ctx->xin->flow;
5123
5124 if ((flow->packet_type == htonl(PT_NSH)) ||
5125 (flow->dl_type == htons(ETH_TYPE_NSH))) {
5126 ctx->wc->masks.nsh.ttl = 0xff;
5127 if (flow->nsh.ttl > 1) {
5128 flow->nsh.ttl--;
5129 return false;
5130 } else {
5131 xlate_controller_action(ctx, UINT16_MAX, OFPR_INVALID_TTL,
5132 0, UINT32_MAX, NULL, 0);
5133 }
5134 }
5135
5136 /* Stop processing for current table. */
5137 xlate_report(ctx, OFT_WARN, "NSH decrement TTL exception");
5138 return true;
5139 }
5140
5141 static void
5142 compose_set_mpls_ttl_action(struct xlate_ctx *ctx, uint8_t ttl)
5143 {
5144 if (eth_type_mpls(ctx->xin->flow.dl_type)) {
5145 ctx->wc->masks.mpls_lse[0] |= htonl(MPLS_TTL_MASK);
5146 set_mpls_lse_ttl(&ctx->xin->flow.mpls_lse[0], ttl);
5147 }
5148 }
5149
5150 static bool
5151 compose_dec_mpls_ttl_action(struct xlate_ctx *ctx)
5152 {
5153 struct flow *flow = &ctx->xin->flow;
5154
5155 if (eth_type_mpls(flow->dl_type)) {
5156 uint8_t ttl = mpls_lse_to_ttl(flow->mpls_lse[0]);
5157
5158 ctx->wc->masks.mpls_lse[0] |= htonl(MPLS_TTL_MASK);
5159 if (ttl > 1) {
5160 ttl--;
5161 set_mpls_lse_ttl(&flow->mpls_lse[0], ttl);
5162 return false;
5163 } else {
5164 xlate_controller_action(ctx, UINT16_MAX, OFPR_INVALID_TTL, 0,
5165 UINT32_MAX, NULL, 0);
5166 }
5167 }
5168
5169 /* Stop processing for current table. */
5170 xlate_report(ctx, OFT_WARN, "MPLS decrement TTL exception");
5171 return true;
5172 }
5173
5174 static void
5175 xlate_delete_field(struct xlate_ctx *ctx,
5176 struct flow *flow,
5177 const struct ofpact_delete_field *odf)
5178 {
5179 struct ds s = DS_EMPTY_INITIALIZER;
5180
5181 /* Currently, only tun_metadata is allowed for delete_field action. */
5182 tun_metadata_delete(&flow->tunnel, odf->field);
5183
5184 ds_put_format(&s, "delete %s", odf->field->name);
5185 xlate_report(ctx, OFT_DETAIL, "%s", ds_cstr(&s));
5186 ds_destroy(&s);
5187 }
5188
5189 /* Emits an action that outputs to 'port', within 'ctx'.
5190 *
5191 * 'controller_len' affects only packets sent to an OpenFlow controller. It
5192 * is the maximum number of bytes of the packet to send. UINT16_MAX means to
5193 * send the whole packet (and 0 means to omit the packet entirely).
5194 *
5195 * 'may_packet_in' determines whether the packet may be sent to an OpenFlow
5196 * controller. If it is false, then the packet is never sent to the OpenFlow
5197 * controller.
5198 *
5199 * 'is_last_action' should be true if this output is the last OpenFlow action
5200 * to be processed, which enables certain optimizations.
5201 *
5202 * 'truncate' should be true if the packet to be output is being truncated,
5203 * which suppresses certain optimizations. */
5204 static void
5205 xlate_output_action(struct xlate_ctx *ctx, ofp_port_t port,
5206 uint16_t controller_len, bool may_packet_in,
5207 bool is_last_action, bool truncate,
5208 bool group_bucket_action)
5209 {
5210 ofp_port_t prev_nf_output_iface = ctx->nf_output_iface;
5211
5212 ctx->nf_output_iface = NF_OUT_DROP;
5213
5214 switch (port) {
5215 case OFPP_IN_PORT:
5216 compose_output_action(ctx, ctx->xin->flow.in_port.ofp_port, NULL,
5217 is_last_action, truncate);
5218 break;
5219 case OFPP_TABLE:
5220 xlate_table_action(ctx, ctx->xin->flow.in_port.ofp_port,
5221 0, may_packet_in, true, false, false,
5222 do_xlate_actions);
5223 break;
5224 case OFPP_NORMAL:
5225 xlate_normal(ctx);
5226 break;
5227 case OFPP_FLOOD:
5228 flood_packets(ctx, false, is_last_action);
5229 break;
5230 case OFPP_ALL:
5231 flood_packets(ctx, true, is_last_action);
5232 break;
5233 case OFPP_CONTROLLER:
5234 xlate_controller_action(ctx, controller_len,
5235 (ctx->in_packet_out ? OFPR_PACKET_OUT
5236 : group_bucket_action ? OFPR_GROUP
5237 : ctx->in_action_set ? OFPR_ACTION_SET
5238 : OFPR_ACTION),
5239 0, UINT32_MAX, NULL, 0);
5240 break;
5241 case OFPP_NONE:
5242 break;
5243 case OFPP_LOCAL:
5244 default:
5245 if (port != ctx->xin->flow.in_port.ofp_port) {
5246 compose_output_action(ctx, port, NULL, is_last_action, truncate);
5247 } else {
5248 xlate_report_info(ctx, "skipping output to input port");
5249 }
5250 break;
5251 }
5252
5253 if (prev_nf_output_iface == NF_OUT_FLOOD) {
5254 ctx->nf_output_iface = NF_OUT_FLOOD;
5255 } else if (ctx->nf_output_iface == NF_OUT_DROP) {
5256 ctx->nf_output_iface = prev_nf_output_iface;
5257 } else if (prev_nf_output_iface != NF_OUT_DROP &&
5258 ctx->nf_output_iface != NF_OUT_FLOOD) {
5259 ctx->nf_output_iface = NF_OUT_MULTI;
5260 }
5261 }
5262
5263 static void
5264 xlate_output_reg_action(struct xlate_ctx *ctx,
5265 const struct ofpact_output_reg *or,
5266 bool is_last_action,
5267 bool group_bucket_action)
5268 {
5269 uint64_t port = mf_get_subfield(&or->src, &ctx->xin->flow);
5270 if (port <= UINT16_MAX) {
5271 xlate_report(ctx, OFT_DETAIL, "output port is %"PRIu64, port);
5272
5273 union mf_subvalue value;
5274
5275 memset(&value, 0xff, sizeof value);
5276 mf_write_subfield_flow(&or->src, &value, &ctx->wc->masks);
5277 xlate_output_action(ctx, u16_to_ofp(port), or->max_len,
5278 false, is_last_action, false,
5279 group_bucket_action);
5280 } else {
5281 xlate_report(ctx, OFT_WARN, "output port %"PRIu64" is out of range",
5282 port);
5283 }
5284 }
5285
5286 static void
5287 xlate_output_trunc_action(struct xlate_ctx *ctx,
5288 ofp_port_t port, uint32_t max_len,
5289 bool is_last_action,
5290 bool group_bucket_action)
5291 {
5292 bool support_trunc = ctx->xbridge->support.trunc;
5293 struct ovs_action_trunc *trunc;
5294 char name[OFP_MAX_PORT_NAME_LEN];
5295
5296 switch (port) {
5297 case OFPP_TABLE:
5298 case OFPP_NORMAL:
5299 case OFPP_FLOOD:
5300 case OFPP_ALL:
5301 case OFPP_CONTROLLER:
5302 case OFPP_NONE:
5303 ofputil_port_to_string(port, NULL, name, sizeof name);
5304 xlate_report(ctx, OFT_WARN,
5305 "output_trunc does not support port: %s", name);
5306 break;
5307 case OFPP_LOCAL:
5308 case OFPP_IN_PORT:
5309 default:
5310 if (port != ctx->xin->flow.in_port.ofp_port) {
5311 const struct xport *xport = get_ofp_port(ctx->xbridge, port);
5312
5313 if (xport == NULL || xport->odp_port == ODPP_NONE) {
5314 /* Since truncate happens at its following output action, if
5315 * the output port is a patch port, the behavior is somehow
5316 * unpredictable. For simplicity, disallow this case. */
5317 ofputil_port_to_string(port, NULL, name, sizeof name);
5318 xlate_report_error(ctx, "output_trunc does not support "
5319 "patch port %s", name);
5320 break;
5321 }
5322
5323 trunc = nl_msg_put_unspec_uninit(ctx->odp_actions,
5324 OVS_ACTION_ATTR_TRUNC,
5325 sizeof *trunc);
5326 trunc->max_len = max_len;
5327 xlate_output_action(ctx, port, 0, false, is_last_action, true,
5328 group_bucket_action);
5329 if (!support_trunc) {
5330 ctx->xout->slow |= SLOW_ACTION;
5331 }
5332 } else {
5333 xlate_report_info(ctx, "skipping output to input port");
5334 }
5335 break;
5336 }
5337 }
5338
5339 static void
5340 xlate_enqueue_action(struct xlate_ctx *ctx,
5341 const struct ofpact_enqueue *enqueue,
5342 bool is_last_action,
5343 bool group_bucket_action)
5344 {
5345 ofp_port_t ofp_port = enqueue->port;
5346 uint32_t queue_id = enqueue->queue;
5347 uint32_t flow_priority, priority;
5348 int error;
5349
5350 /* Translate queue to priority. */
5351 error = dpif_queue_to_priority(ctx->xbridge->dpif, queue_id, &priority);
5352 if (error) {
5353 /* Fall back to ordinary output action. */
5354 xlate_output_action(ctx, enqueue->port, 0, false,
5355 is_last_action, false,
5356 group_bucket_action);
5357 return;
5358 }
5359
5360 /* Check output port. */
5361 if (ofp_port == OFPP_IN_PORT) {
5362 ofp_port = ctx->xin->flow.in_port.ofp_port;
5363 } else if (ofp_port == ctx->xin->flow.in_port.ofp_port) {
5364 return;
5365 }
5366
5367 /* Add datapath actions. */
5368 flow_priority = ctx->xin->flow.skb_priority;
5369 ctx->xin->flow.skb_priority = priority;
5370 compose_output_action(ctx, ofp_port, NULL, is_last_action, false);
5371 ctx->xin->flow.skb_priority = flow_priority;
5372
5373 /* Update NetFlow output port. */
5374 if (ctx->nf_output_iface == NF_OUT_DROP) {
5375 ctx->nf_output_iface = ofp_port;
5376 } else if (ctx->nf_output_iface != NF_OUT_FLOOD) {
5377 ctx->nf_output_iface = NF_OUT_MULTI;
5378 }
5379 }
5380
5381 static void
5382 xlate_set_queue_action(struct xlate_ctx *ctx, uint32_t queue_id)
5383 {
5384 uint32_t skb_priority;
5385
5386 if (!dpif_queue_to_priority(ctx->xbridge->dpif, queue_id, &skb_priority)) {
5387 ctx->xin->flow.skb_priority = skb_priority;
5388 } else {
5389 /* Couldn't translate queue to a priority. Nothing to do. A warning
5390 * has already been logged. */
5391 }
5392 }
5393
5394 static bool
5395 member_enabled_cb(ofp_port_t ofp_port, void *xbridge_)
5396 {
5397 const struct xbridge *xbridge = xbridge_;
5398 struct xport *port;
5399
5400 switch (ofp_port) {
5401 case OFPP_IN_PORT:
5402 case OFPP_TABLE:
5403 case OFPP_NORMAL:
5404 case OFPP_FLOOD:
5405 case OFPP_ALL:
5406 case OFPP_NONE:
5407 return true;
5408 case OFPP_CONTROLLER: /* Not supported by the bundle action. */
5409 return false;
5410 default:
5411 port = get_ofp_port(xbridge, ofp_port);
5412 return port ? port->may_enable : false;
5413 }
5414 }
5415
5416 static void
5417 xlate_bundle_action(struct xlate_ctx *ctx,
5418 const struct ofpact_bundle *bundle,
5419 bool is_last_action,
5420 bool group_bucket_action)
5421 {
5422 ofp_port_t port;
5423
5424 port = bundle_execute(bundle, &ctx->xin->flow, ctx->wc, member_enabled_cb,
5425 CONST_CAST(struct xbridge *, ctx->xbridge));
5426 if (bundle->dst.field) {
5427 nxm_reg_load(&bundle->dst, ofp_to_u16(port), &ctx->xin->flow, ctx->wc);
5428 xlate_report_subfield(ctx, &bundle->dst);
5429 } else {
5430 xlate_output_action(ctx, port, 0, false, is_last_action, false,
5431 group_bucket_action);
5432 }
5433 }
5434
5435 static void
5436 xlate_learn_action(struct xlate_ctx *ctx, const struct ofpact_learn *learn)
5437 {
5438 learn_mask(learn, ctx->wc);
5439
5440 if (ctx->xin->xcache || ctx->xin->allow_side_effects) {
5441 uint64_t ofpacts_stub[1024 / 8];
5442 struct ofputil_flow_mod fm;
5443 struct ofproto_flow_mod ofm__, *ofm;
5444 struct ofpbuf ofpacts;
5445 enum ofperr error;
5446
5447 if (ctx->xin->xcache) {
5448 ofm = xmalloc(sizeof *ofm);
5449 } else {
5450 ofm = &ofm__;
5451 }
5452
5453 ofpbuf_use_stub(&ofpacts, ofpacts_stub, sizeof ofpacts_stub);
5454 learn_execute(learn, &ctx->xin->flow, &fm, &ofpacts);
5455 if (OVS_UNLIKELY(ctx->xin->trace)) {
5456 struct ds s = DS_EMPTY_INITIALIZER;
5457 ds_put_format(&s, "table=%"PRIu8" ", fm.table_id);
5458 minimatch_format(&fm.match,
5459 ofproto_get_tun_tab(&ctx->xin->ofproto->up),
5460 NULL, &s, OFP_DEFAULT_PRIORITY);
5461 ds_chomp(&s, ' ');
5462 ds_put_format(&s, " priority=%d", fm.priority);
5463 if (fm.new_cookie) {
5464 ds_put_format(&s, " cookie=%#"PRIx64, ntohll(fm.new_cookie));
5465 }
5466 if (fm.idle_timeout != OFP_FLOW_PERMANENT) {
5467 ds_put_format(&s, " idle=%"PRIu16, fm.idle_timeout);
5468 }
5469 if (fm.hard_timeout != OFP_FLOW_PERMANENT) {
5470 ds_put_format(&s, " hard=%"PRIu16, fm.hard_timeout);
5471 }
5472 if (fm.flags & NX_LEARN_F_SEND_FLOW_REM) {
5473 ds_put_cstr(&s, " send_flow_rem");
5474 }
5475 ds_put_cstr(&s, " actions=");
5476 struct ofpact_format_params fp = { .s = &s };
5477 ofpacts_format(fm.ofpacts, fm.ofpacts_len, &fp);
5478 xlate_report(ctx, OFT_DETAIL, "%s", ds_cstr(&s));
5479 ds_destroy(&s);
5480 }
5481 error = ofproto_dpif_flow_mod_init_for_learn(ctx->xbridge->ofproto,
5482 &fm, ofm);
5483 ofpbuf_uninit(&ofpacts);
5484
5485 if (!error) {
5486 bool success = true;
5487 if (ctx->xin->allow_side_effects) {
5488 error = ofproto_flow_mod_learn(ofm, ctx->xin->xcache != NULL,
5489 learn->limit, &success);
5490 } else if (learn->limit) {
5491 if (!ofm->temp_rule
5492 || ofm->temp_rule->state != RULE_INSERTED) {
5493 /* The learned rule expired and there are no packets, so
5494 * we cannot learn again. Since the translated actions
5495 * depend on the result of learning, we tell the caller
5496 * that there's no point in caching this result. */
5497 ctx->xout->avoid_caching = true;
5498 }
5499 }
5500
5501 if (learn->flags & NX_LEARN_F_WRITE_RESULT) {
5502 nxm_reg_load(&learn->result_dst, success ? 1 : 0,
5503 &ctx->xin->flow, ctx->wc);
5504 xlate_report_subfield(ctx, &learn->result_dst);
5505 }
5506
5507 if (success && ctx->xin->xcache) {
5508 struct xc_entry *entry;
5509
5510 entry = xlate_cache_add_entry(ctx->xin->xcache, XC_LEARN);
5511 entry->learn.ofm = ofm;
5512 entry->learn.limit = learn->limit;
5513 ofm = NULL;
5514 } else {
5515 ofproto_flow_mod_uninit(ofm);
5516 }
5517
5518 if (OVS_UNLIKELY(ctx->xin->trace && !success)) {
5519 xlate_report(ctx, OFT_DETAIL, "Limit exceeded, learn failed");
5520 }
5521 }
5522
5523 if (ofm != &ofm__) {
5524 free(ofm);
5525 }
5526
5527 if (error) {
5528 xlate_report_error(ctx, "LEARN action execution failed (%s).",
5529 ofperr_to_string(error));
5530 }
5531
5532 minimatch_destroy(&fm.match);
5533 } else {
5534 xlate_report(ctx, OFT_WARN,
5535 "suppressing side effects, so learn action ignored");
5536 }
5537 }
5538
5539 static void
5540 xlate_fin_timeout__(struct rule_dpif *rule, uint16_t tcp_flags,
5541 uint16_t idle_timeout, uint16_t hard_timeout)
5542 {
5543 if (tcp_flags & (TCP_FIN | TCP_RST)) {
5544 ofproto_rule_reduce_timeouts(&rule->up, idle_timeout, hard_timeout);
5545 }
5546 }
5547
5548 static void
5549 xlate_fin_timeout(struct xlate_ctx *ctx,
5550 const struct ofpact_fin_timeout *oft)
5551 {
5552 if (ctx->rule) {
5553 if (ctx->xin->allow_side_effects) {
5554 xlate_fin_timeout__(ctx->rule, ctx->xin->tcp_flags,
5555 oft->fin_idle_timeout, oft->fin_hard_timeout);
5556 }
5557 if (ctx->xin->xcache) {
5558 struct xc_entry *entry;
5559
5560 entry = xlate_cache_add_entry(ctx->xin->xcache, XC_FIN_TIMEOUT);
5561 /* XC_RULE already holds a reference on the rule, none is taken
5562 * here. */
5563 entry->fin.rule = ctx->rule;
5564 entry->fin.idle = oft->fin_idle_timeout;
5565 entry->fin.hard = oft->fin_hard_timeout;
5566 }
5567 }
5568 }
5569
5570 static void
5571 xlate_sample_action(struct xlate_ctx *ctx,
5572 const struct ofpact_sample *os)
5573 {
5574 odp_port_t output_odp_port = ODPP_NONE;
5575 odp_port_t tunnel_out_port = ODPP_NONE;
5576 struct dpif_ipfix *ipfix = ctx->xbridge->ipfix;
5577 bool emit_set_tunnel = false;
5578
5579 if (!ipfix || ctx->xin->flow.in_port.ofp_port == OFPP_NONE) {
5580 return;
5581 }
5582
5583 /* Scale the probability from 16-bit to 32-bit while representing
5584 * the same percentage. */
5585 uint32_t probability = (os->probability << 16) | os->probability;
5586
5587 /* If ofp_port in flow sample action is equel to ofp_port,
5588 * this sample action is a input port action. */
5589 if (os->sampling_port != OFPP_NONE &&
5590 os->sampling_port != ctx->xin->flow.in_port.ofp_port) {
5591 output_odp_port = ofp_port_to_odp_port(ctx->xbridge,
5592 os->sampling_port);
5593 if (output_odp_port == ODPP_NONE) {
5594 xlate_report_error(ctx, "can't use unknown port %d in flow sample "
5595 "action", os->sampling_port);
5596 return;
5597 }
5598
5599 if (dpif_ipfix_get_flow_exporter_tunnel_sampling(ipfix,
5600 os->collector_set_id)
5601 && dpif_ipfix_is_tunnel_port(ipfix, output_odp_port)) {
5602 tunnel_out_port = output_odp_port;
5603 emit_set_tunnel = true;
5604 }
5605 }
5606
5607 xlate_commit_actions(ctx);
5608 /* If 'emit_set_tunnel', sample(sampling_port=1) would translate
5609 * into datapath sample action set(tunnel(...)), sample(...) and
5610 * it is used for sampling egress tunnel information. */
5611 if (emit_set_tunnel) {
5612 const struct xport *xport = get_ofp_port(ctx->xbridge,
5613 os->sampling_port);
5614
5615 if (xport && xport->is_tunnel) {
5616 struct flow *flow = &ctx->xin->flow;
5617 tnl_port_send(xport->ofport, flow, ctx->wc);
5618 if (!ovs_native_tunneling_is_on(ctx->xbridge->ofproto)) {
5619 struct flow_tnl flow_tnl = flow->tunnel;
5620 const char *tnl_type;
5621
5622 tnl_type = tnl_port_get_type(xport->ofport);
5623 commit_odp_tunnel_action(flow, &ctx->base_flow,
5624 ctx->odp_actions, tnl_type);
5625 flow->tunnel = flow_tnl;
5626 }
5627 } else {
5628 xlate_report_error(ctx,
5629 "sampling_port:%d should be a tunnel port.",
5630 os->sampling_port);
5631 }
5632 }
5633
5634 struct user_action_cookie cookie;
5635
5636 memset(&cookie, 0, sizeof cookie);
5637 cookie.type = USER_ACTION_COOKIE_FLOW_SAMPLE;
5638 cookie.ofp_in_port = ctx->xin->flow.in_port.ofp_port;
5639 cookie.ofproto_uuid = ctx->xbridge->ofproto->uuid;
5640 cookie.flow_sample.probability = os->probability;
5641 cookie.flow_sample.collector_set_id = os->collector_set_id;
5642 cookie.flow_sample.obs_domain_id = os->obs_domain_id;
5643 cookie.flow_sample.obs_point_id = os->obs_point_id;
5644 cookie.flow_sample.output_odp_port = output_odp_port;
5645 cookie.flow_sample.direction = os->direction;
5646
5647 compose_sample_action(ctx, probability, &cookie, tunnel_out_port, false);
5648 }
5649
5650 /* Determine if an datapath action translated from the openflow action
5651 * can be reversed by another datapath action.
5652 *
5653 * Openflow actions that do not emit datapath actions are trivially
5654 * reversible. Reversiblity of other actions depends on nature of
5655 * action and their translation. */
5656 static bool
5657 reversible_actions(const struct ofpact *ofpacts, size_t ofpacts_len)
5658 {
5659 const struct ofpact *a;
5660
5661 OFPACT_FOR_EACH (a, ofpacts, ofpacts_len) {
5662 switch (a->type) {
5663 case OFPACT_BUNDLE:
5664 case OFPACT_CLEAR_ACTIONS:
5665 case OFPACT_CLONE:
5666 case OFPACT_CONJUNCTION:
5667 case OFPACT_CONTROLLER:
5668 case OFPACT_CT_CLEAR:
5669 case OFPACT_DEBUG_RECIRC:
5670 case OFPACT_DEBUG_SLOW:
5671 case OFPACT_DEC_MPLS_TTL:
5672 case OFPACT_DEC_TTL:
5673 case OFPACT_ENQUEUE:
5674 case OFPACT_EXIT:
5675 case OFPACT_FIN_TIMEOUT:
5676 case OFPACT_GOTO_TABLE:
5677 case OFPACT_GROUP:
5678 case OFPACT_LEARN:
5679 case OFPACT_MULTIPATH:
5680 case OFPACT_NOTE:
5681 case OFPACT_OUTPUT:
5682 case OFPACT_OUTPUT_REG:
5683 case OFPACT_POP_MPLS:
5684 case OFPACT_POP_QUEUE:
5685 case OFPACT_PUSH_MPLS:
5686 case OFPACT_PUSH_VLAN:
5687 case OFPACT_REG_MOVE:
5688 case OFPACT_RESUBMIT:
5689 case OFPACT_SAMPLE:
5690 case OFPACT_SET_ETH_DST:
5691 case OFPACT_SET_ETH_SRC:
5692 case OFPACT_SET_FIELD:
5693 case OFPACT_SET_IP_DSCP:
5694 case OFPACT_SET_IP_ECN:
5695 case OFPACT_SET_IP_TTL:
5696 case OFPACT_SET_IPV4_DST:
5697 case OFPACT_SET_IPV4_SRC:
5698 case OFPACT_SET_L4_DST_PORT:
5699 case OFPACT_SET_L4_SRC_PORT:
5700 case OFPACT_SET_MPLS_LABEL:
5701 case OFPACT_SET_MPLS_TC:
5702 case OFPACT_SET_MPLS_TTL:
5703 case OFPACT_SET_QUEUE:
5704 case OFPACT_SET_TUNNEL:
5705 case OFPACT_SET_VLAN_PCP:
5706 case OFPACT_SET_VLAN_VID:
5707 case OFPACT_STACK_POP:
5708 case OFPACT_STACK_PUSH:
5709 case OFPACT_STRIP_VLAN:
5710 case OFPACT_UNROLL_XLATE:
5711 case OFPACT_WRITE_ACTIONS:
5712 case OFPACT_WRITE_METADATA:
5713 case OFPACT_CHECK_PKT_LARGER:
5714 case OFPACT_DELETE_FIELD:
5715 break;
5716
5717 case OFPACT_CT:
5718 case OFPACT_METER:
5719 case OFPACT_NAT:
5720 case OFPACT_OUTPUT_TRUNC:
5721 case OFPACT_ENCAP:
5722 case OFPACT_DECAP:
5723 case OFPACT_DEC_NSH_TTL:
5724 return false;
5725 }
5726 }
5727 return true;
5728 }
5729
5730 static void
5731 clone_xlate_actions(const struct ofpact *actions, size_t actions_len,
5732 struct xlate_ctx *ctx, bool is_last_action,
5733 bool group_bucket_action OVS_UNUSED)
5734 {
5735 struct ofpbuf old_stack = ctx->stack;
5736 union mf_subvalue new_stack[1024 / sizeof(union mf_subvalue)];
5737 ofpbuf_use_stub(&ctx->stack, new_stack, sizeof new_stack);
5738 ofpbuf_put(&ctx->stack, old_stack.data, old_stack.size);
5739
5740 struct ofpbuf old_action_set = ctx->action_set;
5741 uint64_t actset_stub[1024 / 8];
5742 ofpbuf_use_stub(&ctx->action_set, actset_stub, sizeof actset_stub);
5743 ofpbuf_put(&ctx->action_set, old_action_set.data, old_action_set.size);
5744
5745 size_t offset, ac_offset;
5746 struct flow old_flow = ctx->xin->flow;
5747
5748 if (reversible_actions(actions, actions_len) || is_last_action) {
5749 old_flow = ctx->xin->flow;
5750 do_xlate_actions(actions, actions_len, ctx, is_last_action, false);
5751 if (!ctx->freezing) {
5752 xlate_action_set(ctx);
5753 }
5754 if (ctx->freezing) {
5755 finish_freezing(ctx);
5756 }
5757 goto xlate_done;
5758 }
5759
5760 /* Commit datapath actions before emitting the clone action to
5761 * avoid emitting those actions twice. Once inside
5762 * the clone, another time for the action after clone. */
5763 xlate_commit_actions(ctx);
5764 struct flow old_base = ctx->base_flow;
5765 bool old_was_mpls = ctx->was_mpls;
5766 bool old_conntracked = ctx->conntracked;
5767
5768 /* The actions are not reversible, a datapath clone action is
5769 * required to encode the translation. Select the clone action
5770 * based on datapath capabilities. */
5771 if (ctx->xbridge->support.clone) { /* Use clone action */
5772 /* Use clone action as datapath clone. */
5773 offset = nl_msg_start_nested(ctx->odp_actions, OVS_ACTION_ATTR_CLONE);
5774 do_xlate_actions(actions, actions_len, ctx, true, false);
5775 if (!ctx->freezing) {
5776 xlate_action_set(ctx);
5777 }
5778 if (ctx->freezing) {
5779 finish_freezing(ctx);
5780 }
5781 nl_msg_end_non_empty_nested(ctx->odp_actions, offset);
5782 goto dp_clone_done;
5783 }
5784
5785 if (ctx->xbridge->support.sample_nesting > 3) {
5786 /* Use sample action as datapath clone. */
5787 offset = nl_msg_start_nested(ctx->odp_actions, OVS_ACTION_ATTR_SAMPLE);
5788 ac_offset = nl_msg_start_nested(ctx->odp_actions,
5789 OVS_SAMPLE_ATTR_ACTIONS);
5790 do_xlate_actions(actions, actions_len, ctx, true, false);
5791 if (!ctx->freezing) {
5792 xlate_action_set(ctx);
5793 }
5794 if (ctx->freezing) {
5795 finish_freezing(ctx);
5796 }
5797 if (nl_msg_end_non_empty_nested(ctx->odp_actions, ac_offset)) {
5798 nl_msg_cancel_nested(ctx->odp_actions, offset);
5799 } else {
5800 nl_msg_put_u32(ctx->odp_actions, OVS_SAMPLE_ATTR_PROBABILITY,
5801 UINT32_MAX); /* 100% probability. */
5802 nl_msg_end_nested(ctx->odp_actions, offset);
5803 }
5804 goto dp_clone_done;
5805 }
5806
5807 /* Datapath does not support clone, skip xlate 'oc' and
5808 * report an error */
5809 xlate_report_error(ctx, "Failed to compose clone action");
5810
5811 dp_clone_done:
5812 /* The clone's conntrack execution should have no effect on the original
5813 * packet. */
5814 ctx->conntracked = old_conntracked;
5815
5816 /* Popping MPLS from the clone should have no effect on the original
5817 * packet. */
5818 ctx->was_mpls = old_was_mpls;
5819
5820 /* Restore the 'base_flow' for the next action. */
5821 ctx->base_flow = old_base;
5822
5823 xlate_done:
5824 ofpbuf_uninit(&ctx->action_set);
5825 ctx->action_set = old_action_set;
5826 ofpbuf_uninit(&ctx->stack);
5827 ctx->stack = old_stack;
5828 ctx->xin->flow = old_flow;
5829 }
5830
5831 static void
5832 compose_clone(struct xlate_ctx *ctx, const struct ofpact_nest *oc,
5833 bool is_last_action)
5834 {
5835 size_t oc_actions_len = ofpact_nest_get_action_len(oc);
5836
5837 clone_xlate_actions(oc->actions, oc_actions_len, ctx, is_last_action,
5838 false);
5839 }
5840
5841 static void
5842 xlate_meter_action(struct xlate_ctx *ctx, const struct ofpact_meter *meter)
5843 {
5844 if (meter->provider_meter_id != UINT32_MAX) {
5845 nl_msg_put_u32(ctx->odp_actions, OVS_ACTION_ATTR_METER,
5846 meter->provider_meter_id);
5847 }
5848 }
5849
5850 static bool
5851 may_receive(const struct xport *xport, struct xlate_ctx *ctx)
5852 {
5853 if (xport->config & (is_stp(&ctx->xin->flow)
5854 ? OFPUTIL_PC_NO_RECV_STP
5855 : OFPUTIL_PC_NO_RECV)) {
5856 return false;
5857 }
5858
5859 /* Only drop packets here if both forwarding and learning are
5860 * disabled. If just learning is enabled, we need to have
5861 * OFPP_NORMAL and the learning action have a look at the packet
5862 * before we can drop it. */
5863 if ((!xport_stp_forward_state(xport) && !xport_stp_learn_state(xport)) ||
5864 (!xport_rstp_forward_state(xport) && !xport_rstp_learn_state(xport))) {
5865 return false;
5866 }
5867
5868 return true;
5869 }
5870
5871 static void
5872 xlate_write_actions__(struct xlate_ctx *ctx,
5873 const struct ofpact *ofpacts, size_t ofpacts_len)
5874 {
5875 /* Maintain actset_output depending on the contents of the action set:
5876 *
5877 * - OFPP_UNSET, if there is no "output" action.
5878 *
5879 * - The output port, if there is an "output" action and no "group"
5880 * action.
5881 *
5882 * - OFPP_UNSET, if there is a "group" action.
5883 */
5884 if (!ctx->action_set_has_group) {
5885 const struct ofpact *a;
5886 OFPACT_FOR_EACH (a, ofpacts, ofpacts_len) {
5887 if (a->type == OFPACT_OUTPUT) {
5888 ctx->xin->flow.actset_output = ofpact_get_OUTPUT(a)->port;
5889 } else if (a->type == OFPACT_GROUP) {
5890 ctx->xin->flow.actset_output = OFPP_UNSET;
5891 ctx->action_set_has_group = true;
5892 break;
5893 }
5894 }
5895 }
5896
5897 ofpbuf_put(&ctx->action_set, ofpacts, ofpacts_len);
5898 }
5899
5900 static void
5901 xlate_write_actions(struct xlate_ctx *ctx, const struct ofpact_nest *a)
5902 {
5903 xlate_write_actions__(ctx, a->actions, ofpact_nest_get_action_len(a));
5904 }
5905
5906 static void
5907 xlate_action_set(struct xlate_ctx *ctx)
5908 {
5909 uint64_t action_list_stub[1024 / 8];
5910 struct ofpbuf action_list = OFPBUF_STUB_INITIALIZER(action_list_stub);
5911 ofpacts_execute_action_set(&action_list, &ctx->action_set);
5912 /* Clear the action set, as it is not needed any more. */
5913 ofpbuf_clear(&ctx->action_set);
5914 if (action_list.size) {
5915 ctx->in_action_set = true;
5916
5917 struct ovs_list *old_trace = ctx->xin->trace;
5918 ctx->xin->trace = xlate_report(ctx, OFT_TABLE,
5919 "--. Executing action set:");
5920 do_xlate_actions(action_list.data, action_list.size, ctx, true, false);
5921 ctx->xin->trace = old_trace;
5922
5923 ctx->in_action_set = false;
5924 }
5925 ofpbuf_uninit(&action_list);
5926 }
5927
5928 static void
5929 freeze_put_unroll_xlate(struct xlate_ctx *ctx)
5930 {
5931 struct ofpact_unroll_xlate *unroll = ctx->frozen_actions.header;
5932
5933 /* Restore the table_id and rule cookie for a potential PACKET
5934 * IN if needed. */
5935 if (!unroll ||
5936 (ctx->table_id != unroll->rule_table_id
5937 || ctx->rule_cookie != unroll->rule_cookie)) {
5938 unroll = ofpact_put_UNROLL_XLATE(&ctx->frozen_actions);
5939 unroll->rule_table_id = ctx->table_id;
5940 unroll->rule_cookie = ctx->rule_cookie;
5941 ctx->frozen_actions.header = unroll;
5942 }
5943 }
5944
5945
5946 /* Copy actions 'a' through 'end' to ctx->frozen_actions, which will be
5947 * executed after thawing. Inserts an UNROLL_XLATE action, if none is already
5948 * present, before any action that may depend on the current table ID or flow
5949 * cookie. */
5950 static void
5951 freeze_unroll_actions(const struct ofpact *a, const struct ofpact *end,
5952 struct xlate_ctx *ctx)
5953 {
5954 for (; a < end; a = ofpact_next(a)) {
5955 switch (a->type) {
5956 case OFPACT_OUTPUT_REG:
5957 case OFPACT_OUTPUT_TRUNC:
5958 case OFPACT_GROUP:
5959 case OFPACT_OUTPUT:
5960 case OFPACT_CONTROLLER:
5961 case OFPACT_DEC_MPLS_TTL:
5962 case OFPACT_DEC_NSH_TTL:
5963 case OFPACT_DEC_TTL:
5964 /* These actions may generate asynchronous messages, which include
5965 * table ID and flow cookie information. */
5966 freeze_put_unroll_xlate(ctx);
5967 break;
5968
5969 case OFPACT_RESUBMIT:
5970 if (ofpact_get_RESUBMIT(a)->table_id == 0xff) {
5971 /* This resubmit action is relative to the current table, so we
5972 * need to track what table that is.*/
5973 freeze_put_unroll_xlate(ctx);
5974 }
5975 break;
5976
5977 case OFPACT_SET_TUNNEL:
5978 case OFPACT_REG_MOVE:
5979 case OFPACT_SET_FIELD:
5980 case OFPACT_STACK_PUSH:
5981 case OFPACT_STACK_POP:
5982 case OFPACT_LEARN:
5983 case OFPACT_WRITE_METADATA:
5984 case OFPACT_GOTO_TABLE:
5985 case OFPACT_ENQUEUE:
5986 case OFPACT_SET_VLAN_VID:
5987 case OFPACT_SET_VLAN_PCP:
5988 case OFPACT_STRIP_VLAN:
5989 case OFPACT_PUSH_VLAN:
5990 case OFPACT_SET_ETH_SRC:
5991 case OFPACT_SET_ETH_DST:
5992 case OFPACT_SET_IPV4_SRC:
5993 case OFPACT_SET_IPV4_DST:
5994 case OFPACT_SET_IP_DSCP:
5995 case OFPACT_SET_IP_ECN:
5996 case OFPACT_SET_IP_TTL:
5997 case OFPACT_SET_L4_SRC_PORT:
5998 case OFPACT_SET_L4_DST_PORT:
5999 case OFPACT_SET_QUEUE:
6000 case OFPACT_POP_QUEUE:
6001 case OFPACT_PUSH_MPLS:
6002 case OFPACT_POP_MPLS:
6003 case OFPACT_SET_MPLS_LABEL:
6004 case OFPACT_SET_MPLS_TC:
6005 case OFPACT_SET_MPLS_TTL:
6006 case OFPACT_MULTIPATH:
6007 case OFPACT_BUNDLE:
6008 case OFPACT_EXIT:
6009 case OFPACT_UNROLL_XLATE:
6010 case OFPACT_FIN_TIMEOUT:
6011 case OFPACT_CLEAR_ACTIONS:
6012 case OFPACT_WRITE_ACTIONS:
6013 case OFPACT_METER:
6014 case OFPACT_SAMPLE:
6015 case OFPACT_CLONE:
6016 case OFPACT_ENCAP:
6017 case OFPACT_DECAP:
6018 case OFPACT_DEBUG_RECIRC:
6019 case OFPACT_DEBUG_SLOW:
6020 case OFPACT_CT:
6021 case OFPACT_CT_CLEAR:
6022 case OFPACT_NAT:
6023 case OFPACT_CHECK_PKT_LARGER:
6024 case OFPACT_DELETE_FIELD:
6025 /* These may not generate PACKET INs. */
6026 break;
6027
6028 case OFPACT_NOTE:
6029 case OFPACT_CONJUNCTION:
6030 /* These need not be copied for restoration. */
6031 continue;
6032 }
6033 /* Copy the action over. */
6034 ofpbuf_put(&ctx->frozen_actions, a, OFPACT_ALIGN(a->len));
6035 }
6036 }
6037
6038 static void
6039 put_ct_mark(const struct flow *flow, struct ofpbuf *odp_actions,
6040 struct flow_wildcards *wc)
6041 {
6042 if (wc->masks.ct_mark) {
6043 struct {
6044 uint32_t key;
6045 uint32_t mask;
6046 } *odp_ct_mark;
6047
6048 odp_ct_mark = nl_msg_put_unspec_uninit(odp_actions, OVS_CT_ATTR_MARK,
6049 sizeof(*odp_ct_mark));
6050 odp_ct_mark->key = flow->ct_mark & wc->masks.ct_mark;
6051 odp_ct_mark->mask = wc->masks.ct_mark;
6052 }
6053 }
6054
6055 static void
6056 put_ct_label(const struct flow *flow, struct ofpbuf *odp_actions,
6057 struct flow_wildcards *wc)
6058 {
6059 if (!ovs_u128_is_zero(wc->masks.ct_label)) {
6060 struct {
6061 ovs_u128 key;
6062 ovs_u128 mask;
6063 } odp_ct_label;
6064
6065 odp_ct_label.key = ovs_u128_and(flow->ct_label, wc->masks.ct_label);
6066 odp_ct_label.mask = wc->masks.ct_label;
6067 nl_msg_put_unspec(odp_actions, OVS_CT_ATTR_LABELS,
6068 &odp_ct_label, sizeof odp_ct_label);
6069 }
6070 }
6071
6072 static void
6073 put_drop_action(struct ofpbuf *odp_actions, enum xlate_error error)
6074 {
6075 nl_msg_put_u32(odp_actions, OVS_ACTION_ATTR_DROP, error);
6076 }
6077
6078 static void
6079 put_ct_helper(struct xlate_ctx *ctx,
6080 struct ofpbuf *odp_actions, struct ofpact_conntrack *ofc)
6081 {
6082 if (ofc->alg) {
6083 switch(ofc->alg) {
6084 case IPPORT_FTP:
6085 nl_msg_put_string(odp_actions, OVS_CT_ATTR_HELPER, "ftp");
6086 break;
6087 case IPPORT_TFTP:
6088 nl_msg_put_string(odp_actions, OVS_CT_ATTR_HELPER, "tftp");
6089 break;
6090 default:
6091 xlate_report_error(ctx, "cannot serialize ct_helper %d", ofc->alg);
6092 break;
6093 }
6094 }
6095 }
6096
6097 static void
6098 put_ct_timeout(struct ofpbuf *odp_actions, const struct dpif_backer *backer,
6099 const struct flow *flow, struct flow_wildcards *wc,
6100 uint16_t zone_id)
6101 {
6102 bool unwildcard;
6103 char *tp_name = NULL;
6104
6105 if (ofproto_dpif_ct_zone_timeout_policy_get_name(backer, zone_id,
6106 ntohs(flow->dl_type), flow->nw_proto, &tp_name, &unwildcard)) {
6107 nl_msg_put_string(odp_actions, OVS_CT_ATTR_TIMEOUT, tp_name);
6108
6109 if (unwildcard) {
6110 /* The underlying datapath requires separate timeout
6111 * policies for different Ethertypes and IP protocols. We
6112 * don't need to unwildcard 'wc->masks.dl_type' since that
6113 * field is always unwildcarded in megaflows. */
6114 memset(&wc->masks.nw_proto, 0xff, sizeof wc->masks.nw_proto);
6115 }
6116 }
6117 free(tp_name);
6118 }
6119
6120 static void
6121 put_ct_nat(struct xlate_ctx *ctx)
6122 {
6123 struct ofpact_nat *ofn = ctx->ct_nat_action;
6124 size_t nat_offset;
6125
6126 if (!ofn) {
6127 return;
6128 }
6129
6130 nat_offset = nl_msg_start_nested(ctx->odp_actions, OVS_CT_ATTR_NAT);
6131 if (ofn->flags & NX_NAT_F_SRC || ofn->flags & NX_NAT_F_DST) {
6132 nl_msg_put_flag(ctx->odp_actions, ofn->flags & NX_NAT_F_SRC
6133 ? OVS_NAT_ATTR_SRC : OVS_NAT_ATTR_DST);
6134 if (ofn->flags & NX_NAT_F_PERSISTENT) {
6135 nl_msg_put_flag(ctx->odp_actions, OVS_NAT_ATTR_PERSISTENT);
6136 }
6137 if (ofn->flags & NX_NAT_F_PROTO_HASH) {
6138 nl_msg_put_flag(ctx->odp_actions, OVS_NAT_ATTR_PROTO_HASH);
6139 } else if (ofn->flags & NX_NAT_F_PROTO_RANDOM) {
6140 nl_msg_put_flag(ctx->odp_actions, OVS_NAT_ATTR_PROTO_RANDOM);
6141 }
6142 if (ofn->range_af == AF_INET) {
6143 nl_msg_put_be32(ctx->odp_actions, OVS_NAT_ATTR_IP_MIN,
6144 ofn->range.addr.ipv4.min);
6145 if (ofn->range.addr.ipv4.max &&
6146 (ntohl(ofn->range.addr.ipv4.max)
6147 > ntohl(ofn->range.addr.ipv4.min))) {
6148 nl_msg_put_be32(ctx->odp_actions, OVS_NAT_ATTR_IP_MAX,
6149 ofn->range.addr.ipv4.max);
6150 }
6151 } else if (ofn->range_af == AF_INET6) {
6152 nl_msg_put_unspec(ctx->odp_actions, OVS_NAT_ATTR_IP_MIN,
6153 &ofn->range.addr.ipv6.min,
6154 sizeof ofn->range.addr.ipv6.min);
6155 if (!ipv6_mask_is_any(&ofn->range.addr.ipv6.max) &&
6156 memcmp(&ofn->range.addr.ipv6.max, &ofn->range.addr.ipv6.min,
6157 sizeof ofn->range.addr.ipv6.max) > 0) {
6158 nl_msg_put_unspec(ctx->odp_actions, OVS_NAT_ATTR_IP_MAX,
6159 &ofn->range.addr.ipv6.max,
6160 sizeof ofn->range.addr.ipv6.max);
6161 }
6162 }
6163 if (ofn->range_af != AF_UNSPEC && ofn->range.proto.min) {
6164 nl_msg_put_u16(ctx->odp_actions, OVS_NAT_ATTR_PROTO_MIN,
6165 ofn->range.proto.min);
6166 if (ofn->range.proto.max &&
6167 ofn->range.proto.max > ofn->range.proto.min) {
6168 nl_msg_put_u16(ctx->odp_actions, OVS_NAT_ATTR_PROTO_MAX,
6169 ofn->range.proto.max);
6170 }
6171 }
6172 }
6173 nl_msg_end_nested(ctx->odp_actions, nat_offset);
6174 }
6175
6176 static void
6177 compose_conntrack_action(struct xlate_ctx *ctx, struct ofpact_conntrack *ofc,
6178 bool is_last_action)
6179 {
6180 ovs_u128 old_ct_label_mask = ctx->wc->masks.ct_label;
6181 uint32_t old_ct_mark_mask = ctx->wc->masks.ct_mark;
6182 size_t ct_offset;
6183 uint16_t zone;
6184
6185 /* Ensure that any prior actions are applied before composing the new
6186 * conntrack action. */
6187 xlate_commit_actions(ctx);
6188
6189 /* Process nested actions first, to populate the key. */
6190 ctx->ct_nat_action = NULL;
6191 ctx->wc->masks.ct_mark = 0;
6192 ctx->wc->masks.ct_label = OVS_U128_ZERO;
6193 do_xlate_actions(ofc->actions, ofpact_ct_get_action_len(ofc), ctx,
6194 is_last_action, false);
6195
6196 if (ofc->zone_src.field) {
6197 zone = mf_get_subfield(&ofc->zone_src, &ctx->xin->flow);
6198 } else {
6199 zone = ofc->zone_imm;
6200 }
6201
6202 ct_offset = nl_msg_start_nested(ctx->odp_actions, OVS_ACTION_ATTR_CT);
6203 if (ofc->flags & NX_CT_F_COMMIT) {
6204 nl_msg_put_flag(ctx->odp_actions, ofc->flags & NX_CT_F_FORCE ?
6205 OVS_CT_ATTR_FORCE_COMMIT : OVS_CT_ATTR_COMMIT);
6206 if (ctx->xbridge->support.ct_eventmask) {
6207 nl_msg_put_u32(ctx->odp_actions, OVS_CT_ATTR_EVENTMASK,
6208 OVS_CT_EVENTMASK_DEFAULT);
6209 }
6210 if (ctx->xbridge->support.ct_timeout) {
6211 put_ct_timeout(ctx->odp_actions, ctx->xbridge->ofproto->backer,
6212 &ctx->xin->flow, ctx->wc, zone);
6213 }
6214 }
6215 nl_msg_put_u16(ctx->odp_actions, OVS_CT_ATTR_ZONE, zone);
6216 put_ct_mark(&ctx->xin->flow, ctx->odp_actions, ctx->wc);
6217 put_ct_label(&ctx->xin->flow, ctx->odp_actions, ctx->wc);
6218 put_ct_helper(ctx, ctx->odp_actions, ofc);
6219 put_ct_nat(ctx);
6220 nl_msg_end_nested(ctx->odp_actions, ct_offset);
6221
6222 ctx->wc->masks.ct_mark = old_ct_mark_mask;
6223 ctx->wc->masks.ct_label = old_ct_label_mask;
6224
6225 if (ofc->recirc_table != NX_CT_RECIRC_NONE) {
6226 ctx->conntracked = true;
6227 compose_recirculate_and_fork(ctx, ofc->recirc_table, zone);
6228 }
6229
6230 ctx->ct_nat_action = NULL;
6231
6232 /* The ct_* fields are only available in the scope of the 'recirc_table'
6233 * call chain. */
6234 flow_clear_conntrack(&ctx->xin->flow);
6235 xlate_report(ctx, OFT_DETAIL, "Sets the packet to an untracked state, "
6236 "and clears all the conntrack fields.");
6237 ctx->conntracked = false;
6238 }
6239
6240 static void
6241 compose_ct_clear_action(struct xlate_ctx *ctx)
6242 {
6243 clear_conntrack(ctx);
6244 /* This action originally existed without dpif support. So to preserve
6245 * compatibility, only append it if the dpif supports it. */
6246 if (ctx->xbridge->support.ct_clear) {
6247 nl_msg_put_flag(ctx->odp_actions, OVS_ACTION_ATTR_CT_CLEAR);
6248 }
6249 }
6250
6251 /* check_pkt_larger action checks the packet length and stores the
6252 * result in the register bit. We translate this action to the
6253 * datapath action - 'check_pkt_len' whose format
6254 * is: 'check_pkt_len(pkt_len, ge(actions), le(actions))'.
6255 *
6256 * We first set the destination register bit to 1 and call
6257 * 'do_xlate_actions' for the case - packet len greater than
6258 * the specified packet length.
6259 *
6260 * We then set the destination register bit to 0 and call
6261 * 'do_xlate_actions' for the case - packet length is lesser or
6262 * equal to the specified packet length.
6263 *
6264 * It is possible for freezing to happen for both the cases.
6265 */
6266 static void
6267 xlate_check_pkt_larger(struct xlate_ctx *ctx,
6268 struct ofpact_check_pkt_larger *check_pkt_larger,
6269 const struct ofpact *remaining_acts,
6270 size_t remaining_acts_len)
6271 {
6272 union mf_subvalue value;
6273 memset(&value, 0, sizeof value);
6274 if (!ctx->xbridge->support.check_pkt_len) {
6275 uint8_t is_pkt_larger = 0;
6276 if (ctx->xin->packet) {
6277 is_pkt_larger =
6278 dp_packet_size(ctx->xin->packet) > check_pkt_larger->pkt_len;
6279 }
6280 value.u8_val = is_pkt_larger;
6281 mf_write_subfield_flow(&check_pkt_larger->dst, &value,
6282 &ctx->xin->flow);
6283 /* If datapath doesn't support check_pkt_len action, then set the
6284 * SLOW_ACTION flag. If we don't set SLOW_ACTION, we
6285 * will push a flow to the datapath based on the packet length
6286 * in ctx->xin->packet. For subsequent patches which match the
6287 * same flow, datapath will apply the actions without considering
6288 * the packet length. This results in wrong actions being applied.
6289 */
6290 ctx->xout->slow |= SLOW_ACTION;
6291 return;
6292 }
6293
6294 struct ofpbuf old_stack = ctx->stack;
6295 union mf_subvalue new_stack[1024 / sizeof(union mf_subvalue)];
6296 ofpbuf_use_stub(&ctx->stack, new_stack, sizeof new_stack);
6297 ofpbuf_put(&ctx->stack, old_stack.data, old_stack.size);
6298
6299 struct ofpbuf old_action_set = ctx->action_set;
6300 uint64_t actset_stub[1024 / 8];
6301 ofpbuf_use_stub(&ctx->action_set, actset_stub, sizeof actset_stub);
6302 ofpbuf_put(&ctx->action_set, old_action_set.data, old_action_set.size);
6303
6304 struct flow old_flow = ctx->xin->flow;
6305 xlate_commit_actions(ctx);
6306 struct flow old_base = ctx->base_flow;
6307 bool old_was_mpls = ctx->was_mpls;
6308 bool old_conntracked = ctx->conntracked;
6309
6310 size_t offset = nl_msg_start_nested(ctx->odp_actions,
6311 OVS_ACTION_ATTR_CHECK_PKT_LEN);
6312 nl_msg_put_u16(ctx->odp_actions, OVS_CHECK_PKT_LEN_ATTR_PKT_LEN,
6313 check_pkt_larger->pkt_len);
6314 size_t offset_attr = nl_msg_start_nested(
6315 ctx->odp_actions, OVS_CHECK_PKT_LEN_ATTR_ACTIONS_IF_GREATER);
6316 value.u8_val = 1;
6317 mf_write_subfield_flow(&check_pkt_larger->dst, &value, &ctx->xin->flow);
6318 do_xlate_actions(remaining_acts, remaining_acts_len, ctx, true, false);
6319 if (!ctx->freezing) {
6320 xlate_action_set(ctx);
6321 }
6322 if (ctx->freezing) {
6323 finish_freezing(ctx);
6324 }
6325 nl_msg_end_nested(ctx->odp_actions, offset_attr);
6326
6327 ctx->base_flow = old_base;
6328 ctx->was_mpls = old_was_mpls;
6329 ctx->conntracked = old_conntracked;
6330 ctx->xin->flow = old_flow;
6331
6332 /* If the flow translation for the IF_GREATER case requires freezing,
6333 * then ctx->exit would be true. Reset to false so that we can
6334 * do flow translation for 'IF_LESS_EQUAL' case. finish_freezing()
6335 * would have taken care of Undoing the changes done for freeze. */
6336 ctx->exit = false;
6337
6338 offset_attr = nl_msg_start_nested(
6339 ctx->odp_actions, OVS_CHECK_PKT_LEN_ATTR_ACTIONS_IF_LESS_EQUAL);
6340 value.u8_val = 0;
6341 mf_write_subfield_flow(&check_pkt_larger->dst, &value, &ctx->xin->flow);
6342 do_xlate_actions(remaining_acts, remaining_acts_len, ctx, true, false);
6343 if (!ctx->freezing) {
6344 xlate_action_set(ctx);
6345 }
6346 if (ctx->freezing) {
6347 finish_freezing(ctx);
6348 }
6349 nl_msg_end_nested(ctx->odp_actions, offset_attr);
6350 nl_msg_end_nested(ctx->odp_actions, offset);
6351
6352 ofpbuf_uninit(&ctx->action_set);
6353 ctx->action_set = old_action_set;
6354 ofpbuf_uninit(&ctx->stack);
6355 ctx->stack = old_stack;
6356 ctx->base_flow = old_base;
6357 ctx->was_mpls = old_was_mpls;
6358 ctx->conntracked = old_conntracked;
6359 ctx->xin->flow = old_flow;
6360 ctx->exit = true;
6361 }
6362
6363 static void
6364 rewrite_flow_encap_ethernet(struct xlate_ctx *ctx,
6365 struct flow *flow,
6366 struct flow_wildcards *wc)
6367 {
6368 wc->masks.packet_type = OVS_BE32_MAX;
6369 if (pt_ns(flow->packet_type) == OFPHTN_ETHERTYPE) {
6370 /* Only adjust the packet_type and zero the dummy Ethernet addresses. */
6371 ovs_be16 ethertype = pt_ns_type_be(flow->packet_type);
6372 flow->packet_type = htonl(PT_ETH);
6373 flow->dl_src = eth_addr_zero;
6374 flow->dl_dst = eth_addr_zero;
6375 flow->dl_type = ethertype;
6376 } else {
6377 /* Error handling: drop packet. */
6378 xlate_report_debug(ctx, OFT_ACTION,
6379 "Dropping packet as encap(ethernet) is not "
6380 "supported for packet type ethernet.");
6381 ctx->error = XLATE_UNSUPPORTED_PACKET_TYPE;
6382 }
6383 }
6384
6385 /* For an MD2 NSH header returns a pointer to an ofpbuf with the encoded
6386 * MD2 TLVs provided as encap properties to the encap operation. This
6387 * will be stored as encap_data in the ctx and copied into the push_nsh
6388 * action at the next commit. */
6389 static struct ofpbuf *
6390 rewrite_flow_push_nsh(struct xlate_ctx *ctx,
6391 const struct ofpact_encap *encap,
6392 struct flow *flow,
6393 struct flow_wildcards *wc)
6394 {
6395 ovs_be32 packet_type = flow->packet_type;
6396 const char *ptr = (char *) encap->props;
6397 struct ofpbuf *buf = ofpbuf_new(NSH_CTX_HDRS_MAX_LEN);
6398 uint8_t md_type = NSH_M_TYPE1;
6399 uint8_t np = 0;
6400 int i;
6401
6402 /* Scan the optional NSH encap TLV properties, if any. */
6403 for (i = 0; i < encap->n_props; i++) {
6404 struct ofpact_ed_prop *prop_ptr =
6405 ALIGNED_CAST(struct ofpact_ed_prop *, ptr);
6406 if (prop_ptr->prop_class == OFPPPC_NSH) {
6407 switch (prop_ptr->type) {
6408 case OFPPPT_PROP_NSH_MDTYPE: {
6409 struct ofpact_ed_prop_nsh_md_type *prop_md_type =
6410 ALIGNED_CAST(struct ofpact_ed_prop_nsh_md_type *,
6411 prop_ptr);
6412 md_type = prop_md_type->md_type;
6413 break;
6414 }
6415 case OFPPPT_PROP_NSH_TLV: {
6416 struct ofpact_ed_prop_nsh_tlv *tlv_prop =
6417 ALIGNED_CAST(struct ofpact_ed_prop_nsh_tlv *,
6418 prop_ptr);
6419 struct nsh_md2_tlv *md2_ctx =
6420 ofpbuf_put_uninit(buf, sizeof(*md2_ctx));
6421 md2_ctx->md_class = tlv_prop->tlv_class;
6422 md2_ctx->type = tlv_prop->tlv_type;
6423 md2_ctx->length = tlv_prop->tlv_len;
6424 size_t len = ROUND_UP(md2_ctx->length, 4);
6425 size_t padding = len - md2_ctx->length;
6426 ofpbuf_put(buf, tlv_prop->data, md2_ctx->length);
6427 ofpbuf_put_zeros(buf, padding);
6428 break;
6429 }
6430 default:
6431 /* No other NSH encap properties defined yet. */
6432 break;
6433 }
6434 }
6435 ptr += ROUND_UP(prop_ptr->len, 8);
6436 }
6437 if (buf->size == 0 || buf->size > NSH_CTX_HDRS_MAX_LEN) {
6438 ofpbuf_delete(buf);
6439 buf = NULL;
6440 }
6441
6442 /* Determine the Next Protocol field for NSH header. */
6443 switch (ntohl(packet_type)) {
6444 case PT_ETH:
6445 np = NSH_P_ETHERNET;
6446 break;
6447 case PT_IPV4:
6448 np = NSH_P_IPV4;
6449 break;
6450 case PT_IPV6:
6451 np = NSH_P_IPV6;
6452 break;
6453 case PT_NSH:
6454 np = NSH_P_NSH;
6455 break;
6456 default:
6457 /* Error handling: drop packet. */
6458 xlate_report_debug(ctx, OFT_ACTION,
6459 "Dropping packet as encap(nsh) is not "
6460 "supported for packet type (%d,0x%x)",
6461 pt_ns(packet_type), pt_ns_type(packet_type));
6462 ctx->error = XLATE_UNSUPPORTED_PACKET_TYPE;
6463 return buf;
6464 }
6465 /* Note that we have matched on packet_type! */
6466 wc->masks.packet_type = OVS_BE32_MAX;
6467
6468 /* Reset all current flow packet headers. */
6469 memset(&flow->dl_dst, 0,
6470 sizeof(struct flow) - offsetof(struct flow, dl_dst));
6471
6472 /* Populate the flow with the new NSH header. */
6473 flow->packet_type = htonl(PT_NSH);
6474 flow->dl_type = htons(ETH_TYPE_NSH);
6475 flow->nsh.flags = 0;
6476 flow->nsh.ttl = 63;
6477 flow->nsh.np = np;
6478 flow->nsh.path_hdr = htonl(255);
6479
6480 if (md_type == NSH_M_TYPE1) {
6481 flow->nsh.mdtype = NSH_M_TYPE1;
6482 memset(flow->nsh.context, 0, sizeof flow->nsh.context);
6483 if (buf) {
6484 /* Drop any MD2 context TLVs. */
6485 ofpbuf_delete(buf);
6486 buf = NULL;
6487 }
6488 } else if (md_type == NSH_M_TYPE2) {
6489 flow->nsh.mdtype = NSH_M_TYPE2;
6490 }
6491 flow->nsh.mdtype &= NSH_MDTYPE_MASK;
6492
6493 return buf;
6494 }
6495
6496 static void
6497 xlate_generic_encap_action(struct xlate_ctx *ctx,
6498 const struct ofpact_encap *encap)
6499 {
6500 struct flow *flow = &ctx->xin->flow;
6501 struct flow_wildcards *wc = ctx->wc;
6502 struct ofpbuf *encap_data = NULL;
6503
6504 /* Ensure that any pending actions on the inner packet are applied before
6505 * rewriting the flow */
6506 xlate_commit_actions(ctx);
6507
6508 /* Rewrite the flow to reflect the effect of pushing the new encap header. */
6509 switch (ntohl(encap->new_pkt_type)) {
6510 case PT_ETH:
6511 rewrite_flow_encap_ethernet(ctx, flow, wc);
6512 break;
6513 case PT_NSH:
6514 encap_data = rewrite_flow_push_nsh(ctx, encap, flow, wc);
6515 break;
6516 default:
6517 /* New packet type was checked during decoding. */
6518 OVS_NOT_REACHED();
6519 }
6520
6521 if (!ctx->error) {
6522 /* The actual encap datapath action will be generated at next commit. */
6523 ctx->pending_encap = true;
6524 ctx->encap_data = encap_data;
6525 }
6526 }
6527
6528 /* Returns true if packet must be recirculated after decapsulation. */
6529 static bool
6530 xlate_generic_decap_action(struct xlate_ctx *ctx,
6531 const struct ofpact_decap *decap OVS_UNUSED)
6532 {
6533 struct flow *flow = &ctx->xin->flow;
6534
6535 /* Ensure that any pending actions on the current packet are applied
6536 * before generating the decap action. */
6537 xlate_commit_actions(ctx);
6538
6539 /* We assume for now that the new_pkt_type is PT_USE_NEXT_PROTO. */
6540 switch (ntohl(flow->packet_type)) {
6541 case PT_ETH:
6542 if (flow->vlans[0].tci & htons(VLAN_CFI)) {
6543 /* Error handling: drop packet. */
6544 xlate_report_debug(ctx, OFT_ACTION, "Dropping packet, cannot "
6545 "decap Ethernet if VLAN is present.");
6546 ctx->error = XLATE_UNSUPPORTED_PACKET_TYPE;
6547 } else {
6548 /* Just change the packet_type.
6549 * Delay generating pop_eth to the next commit. */
6550 flow->packet_type = htonl(PACKET_TYPE(OFPHTN_ETHERTYPE,
6551 ntohs(flow->dl_type)));
6552 ctx->wc->masks.dl_type = OVS_BE16_MAX;
6553 }
6554 return false;
6555 case PT_NSH:
6556 /* The pop_nsh action is generated at the commit executed as
6557 * part of freezing the ctx for recirculation. Here we just set
6558 * the new packet type based on the NSH next protocol field. */
6559 switch (flow->nsh.np) {
6560 case NSH_P_ETHERNET:
6561 flow->packet_type = htonl(PT_ETH);
6562 break;
6563 case NSH_P_IPV4:
6564 flow->packet_type = htonl(PT_IPV4);
6565 break;
6566 case NSH_P_IPV6:
6567 flow->packet_type = htonl(PT_IPV6);
6568 break;
6569 case NSH_P_NSH:
6570 flow->packet_type = htonl(PT_NSH);
6571 break;
6572 default:
6573 /* Error handling: drop packet. */
6574 xlate_report_debug(ctx, OFT_ACTION,
6575 "Dropping packet as NSH next protocol %d "
6576 "is not supported", flow->nsh.np);
6577 ctx->error = XLATE_UNSUPPORTED_PACKET_TYPE;
6578 return false;
6579 break;
6580 }
6581 ctx->wc->masks.nsh.np = UINT8_MAX;
6582 ctx->pending_decap = true;
6583 /* Trigger recirculation. */
6584 return true;
6585 default:
6586 /* Error handling: drop packet. */
6587 xlate_report_debug(
6588 ctx, OFT_ACTION,
6589 "Dropping packet as the decap() does not support "
6590 "packet type (%d,0x%x)",
6591 pt_ns(flow->packet_type), pt_ns_type(flow->packet_type));
6592 ctx->error = XLATE_UNSUPPORTED_PACKET_TYPE;
6593 return false;
6594 }
6595 }
6596
6597 static void
6598 recirc_for_mpls(const struct ofpact *a, struct xlate_ctx *ctx)
6599 {
6600 /* No need to recirculate if already exiting. */
6601 if (ctx->exit) {
6602 return;
6603 }
6604
6605 /* Do not consider recirculating unless the packet was previously MPLS. */
6606 if (!ctx->was_mpls) {
6607 return;
6608 }
6609
6610 /* Special case these actions, only recirculating if necessary.
6611 * This avoids the overhead of recirculation in common use-cases.
6612 */
6613 switch (a->type) {
6614
6615 /* Output actions do not require recirculation. */
6616 case OFPACT_OUTPUT:
6617 case OFPACT_OUTPUT_TRUNC:
6618 case OFPACT_ENQUEUE:
6619 case OFPACT_OUTPUT_REG:
6620 /* Set actions that don't touch L3+ fields do not require recirculation. */
6621 case OFPACT_SET_VLAN_VID:
6622 case OFPACT_SET_VLAN_PCP:
6623 case OFPACT_SET_ETH_SRC:
6624 case OFPACT_SET_ETH_DST:
6625 case OFPACT_SET_TUNNEL:
6626 case OFPACT_SET_QUEUE:
6627 /* If actions of a group require recirculation that can be detected
6628 * when translating them. */
6629 case OFPACT_GROUP:
6630 return;
6631
6632 /* Set field that don't touch L3+ fields don't require recirculation. */
6633 case OFPACT_SET_FIELD:
6634 if (mf_is_l3_or_higher(ofpact_get_SET_FIELD(a)->field)) {
6635 break;
6636 }
6637 return;
6638
6639 /* For simplicity, recirculate in all other cases. */
6640 case OFPACT_CONTROLLER:
6641 case OFPACT_BUNDLE:
6642 case OFPACT_STRIP_VLAN:
6643 case OFPACT_PUSH_VLAN:
6644 case OFPACT_SET_IPV4_SRC:
6645 case OFPACT_SET_IPV4_DST:
6646 case OFPACT_SET_IP_DSCP:
6647 case OFPACT_SET_IP_ECN:
6648 case OFPACT_SET_IP_TTL:
6649 case OFPACT_SET_L4_SRC_PORT:
6650 case OFPACT_SET_L4_DST_PORT:
6651 case OFPACT_REG_MOVE:
6652 case OFPACT_STACK_PUSH:
6653 case OFPACT_STACK_POP:
6654 case OFPACT_DEC_TTL:
6655 case OFPACT_SET_MPLS_LABEL:
6656 case OFPACT_SET_MPLS_TC:
6657 case OFPACT_SET_MPLS_TTL:
6658 case OFPACT_DEC_MPLS_TTL:
6659 case OFPACT_PUSH_MPLS:
6660 case OFPACT_POP_MPLS:
6661 case OFPACT_POP_QUEUE:
6662 case OFPACT_FIN_TIMEOUT:
6663 case OFPACT_RESUBMIT:
6664 case OFPACT_LEARN:
6665 case OFPACT_CONJUNCTION:
6666 case OFPACT_MULTIPATH:
6667 case OFPACT_NOTE:
6668 case OFPACT_EXIT:
6669 case OFPACT_SAMPLE:
6670 case OFPACT_CLONE:
6671 case OFPACT_ENCAP:
6672 case OFPACT_DECAP:
6673 case OFPACT_DEC_NSH_TTL:
6674 case OFPACT_UNROLL_XLATE:
6675 case OFPACT_CT:
6676 case OFPACT_CT_CLEAR:
6677 case OFPACT_NAT:
6678 case OFPACT_DEBUG_RECIRC:
6679 case OFPACT_DEBUG_SLOW:
6680 case OFPACT_METER:
6681 case OFPACT_CLEAR_ACTIONS:
6682 case OFPACT_WRITE_ACTIONS:
6683 case OFPACT_WRITE_METADATA:
6684 case OFPACT_GOTO_TABLE:
6685 case OFPACT_CHECK_PKT_LARGER:
6686 case OFPACT_DELETE_FIELD:
6687 default:
6688 break;
6689 }
6690
6691 /* Recirculate */
6692 ctx_trigger_freeze(ctx);
6693 }
6694
6695 static void
6696 xlate_ofpact_reg_move(struct xlate_ctx *ctx, const struct ofpact_reg_move *a)
6697 {
6698 mf_subfield_copy(&a->src, &a->dst, &ctx->xin->flow, ctx->wc);
6699 xlate_report_subfield(ctx, &a->dst);
6700 }
6701
6702 static void
6703 xlate_ofpact_stack_pop(struct xlate_ctx *ctx, const struct ofpact_stack *a)
6704 {
6705 if (nxm_execute_stack_pop(a, &ctx->xin->flow, ctx->wc, &ctx->stack)) {
6706 xlate_report_subfield(ctx, &a->subfield);
6707 } else {
6708 xlate_report_error(ctx, "stack underflow");
6709 }
6710 }
6711
6712 /* Restore translation context data that was stored earlier. */
6713 static void
6714 xlate_ofpact_unroll_xlate(struct xlate_ctx *ctx,
6715 const struct ofpact_unroll_xlate *a)
6716 {
6717 ctx->table_id = a->rule_table_id;
6718 ctx->rule_cookie = a->rule_cookie;
6719 xlate_report(ctx, OFT_THAW, "restored state: table=%"PRIu8", "
6720 "cookie=%#"PRIx64, a->rule_table_id, a->rule_cookie);
6721 }
6722
6723 static void
6724 do_xlate_actions(const struct ofpact *ofpacts, size_t ofpacts_len,
6725 struct xlate_ctx *ctx, bool is_last_action,
6726 bool group_bucket_action)
6727 {
6728 struct flow_wildcards *wc = ctx->wc;
6729 struct flow *flow = &ctx->xin->flow;
6730 const struct ofpact *a;
6731
6732 /* dl_type already in the mask, not set below. */
6733
6734 if (!ofpacts_len) {
6735 xlate_report(ctx, OFT_ACTION, "drop");
6736 return;
6737 }
6738
6739 OFPACT_FOR_EACH (a, ofpacts, ofpacts_len) {
6740 struct ofpact_controller *controller;
6741 const struct ofpact_metadata *metadata;
6742 const struct ofpact_set_field *set_field;
6743 const struct mf_field *mf;
6744 bool last = is_last_action && ofpact_last(a, ofpacts, ofpacts_len)
6745 && ctx->action_set.size;
6746
6747 if (ctx->error) {
6748 break;
6749 }
6750
6751 recirc_for_mpls(a, ctx);
6752
6753 if (ctx->exit) {
6754 /* Check if need to store the remaining actions for later
6755 * execution. */
6756 if (ctx->freezing) {
6757 freeze_unroll_actions(a, ofpact_end(ofpacts, ofpacts_len),
6758 ctx);
6759 }
6760 break;
6761 }
6762
6763 if (OVS_UNLIKELY(ctx->xin->trace)) {
6764 struct ds s = DS_EMPTY_INITIALIZER;
6765 struct ofpact_format_params fp = { .s = &s };
6766 ofpacts_format(a, OFPACT_ALIGN(a->len), &fp);
6767 xlate_report(ctx, OFT_ACTION, "%s", ds_cstr(&s));
6768 ds_destroy(&s);
6769 }
6770
6771 switch (a->type) {
6772 case OFPACT_OUTPUT:
6773 xlate_output_action(ctx, ofpact_get_OUTPUT(a)->port,
6774 ofpact_get_OUTPUT(a)->max_len, true, last,
6775 false, group_bucket_action);
6776 break;
6777
6778 case OFPACT_GROUP:
6779 if (xlate_group_action(ctx, ofpact_get_GROUP(a)->group_id, last)) {
6780 /* Group could not be found. */
6781
6782 /* XXX: Terminates action list translation, but does not
6783 * terminate the pipeline. */
6784 return;
6785 }
6786 break;
6787
6788 case OFPACT_CONTROLLER:
6789 controller = ofpact_get_CONTROLLER(a);
6790 if (controller->pause) {
6791 ctx->pause = controller;
6792 ctx_trigger_freeze(ctx);
6793 a = ofpact_next(a);
6794 } else {
6795 xlate_controller_action(ctx, controller->max_len,
6796 controller->reason,
6797 controller->controller_id,
6798 controller->provider_meter_id,
6799 controller->userdata,
6800 controller->userdata_len);
6801 }
6802 break;
6803
6804 case OFPACT_ENQUEUE:
6805 memset(&wc->masks.skb_priority, 0xff,
6806 sizeof wc->masks.skb_priority);
6807 xlate_enqueue_action(ctx, ofpact_get_ENQUEUE(a), last,
6808 group_bucket_action);
6809 break;
6810
6811 case OFPACT_SET_VLAN_VID:
6812 wc->masks.vlans[0].tci |= htons(VLAN_VID_MASK | VLAN_CFI);
6813 if (flow->vlans[0].tci & htons(VLAN_CFI) ||
6814 ofpact_get_SET_VLAN_VID(a)->push_vlan_if_needed) {
6815 if (!flow->vlans[0].tpid) {
6816 flow->vlans[0].tpid = htons(ETH_TYPE_VLAN);
6817 }
6818 flow->vlans[0].tci &= ~htons(VLAN_VID_MASK);
6819 flow->vlans[0].tci |=
6820 (htons(ofpact_get_SET_VLAN_VID(a)->vlan_vid) |
6821 htons(VLAN_CFI));
6822 }
6823 break;
6824
6825 case OFPACT_SET_VLAN_PCP:
6826 wc->masks.vlans[0].tci |= htons(VLAN_PCP_MASK | VLAN_CFI);
6827 if (flow->vlans[0].tci & htons(VLAN_CFI) ||
6828 ofpact_get_SET_VLAN_PCP(a)->push_vlan_if_needed) {
6829 if (!flow->vlans[0].tpid) {
6830 flow->vlans[0].tpid = htons(ETH_TYPE_VLAN);
6831 }
6832 flow->vlans[0].tci &= ~htons(VLAN_PCP_MASK);
6833 flow->vlans[0].tci |=
6834 htons((ofpact_get_SET_VLAN_PCP(a)->vlan_pcp
6835 << VLAN_PCP_SHIFT) | VLAN_CFI);
6836 }
6837 break;
6838
6839 case OFPACT_STRIP_VLAN:
6840 flow_pop_vlan(flow, wc);
6841 break;
6842
6843 case OFPACT_PUSH_VLAN:
6844 flow_push_vlan_uninit(flow, wc);
6845 flow->vlans[0].tpid = ofpact_get_PUSH_VLAN(a)->ethertype;
6846 flow->vlans[0].tci = htons(VLAN_CFI);
6847 break;
6848
6849 case OFPACT_SET_ETH_SRC:
6850 WC_MASK_FIELD(wc, dl_src);
6851 flow->dl_src = ofpact_get_SET_ETH_SRC(a)->mac;
6852 break;
6853
6854 case OFPACT_SET_ETH_DST:
6855 WC_MASK_FIELD(wc, dl_dst);
6856 flow->dl_dst = ofpact_get_SET_ETH_DST(a)->mac;
6857 break;
6858
6859 case OFPACT_SET_IPV4_SRC:
6860 if (flow->dl_type == htons(ETH_TYPE_IP)) {
6861 memset(&wc->masks.nw_src, 0xff, sizeof wc->masks.nw_src);
6862 flow->nw_src = ofpact_get_SET_IPV4_SRC(a)->ipv4;
6863 }
6864 break;
6865
6866 case OFPACT_SET_IPV4_DST:
6867 if (flow->dl_type == htons(ETH_TYPE_IP)) {
6868 memset(&wc->masks.nw_dst, 0xff, sizeof wc->masks.nw_dst);
6869 flow->nw_dst = ofpact_get_SET_IPV4_DST(a)->ipv4;
6870 }
6871 break;
6872
6873 case OFPACT_SET_IP_DSCP:
6874 if (is_ip_any(flow)) {
6875 wc->masks.nw_tos |= IP_DSCP_MASK;
6876 flow->nw_tos &= ~IP_DSCP_MASK;
6877 flow->nw_tos |= ofpact_get_SET_IP_DSCP(a)->dscp;
6878 }
6879 break;
6880
6881 case OFPACT_SET_IP_ECN:
6882 if (is_ip_any(flow)) {
6883 wc->masks.nw_tos |= IP_ECN_MASK;
6884 flow->nw_tos &= ~IP_ECN_MASK;
6885 flow->nw_tos |= ofpact_get_SET_IP_ECN(a)->ecn;
6886 }
6887 break;
6888
6889 case OFPACT_SET_IP_TTL:
6890 if (is_ip_any(flow)) {
6891 wc->masks.nw_ttl = 0xff;
6892 flow->nw_ttl = ofpact_get_SET_IP_TTL(a)->ttl;
6893 }
6894 break;
6895
6896 case OFPACT_SET_L4_SRC_PORT:
6897 if (is_ip_any(flow) && !(flow->nw_frag & FLOW_NW_FRAG_LATER)) {
6898 memset(&wc->masks.nw_proto, 0xff, sizeof wc->masks.nw_proto);
6899 memset(&wc->masks.tp_src, 0xff, sizeof wc->masks.tp_src);
6900 flow->tp_src = htons(ofpact_get_SET_L4_SRC_PORT(a)->port);
6901 }
6902 break;
6903
6904 case OFPACT_SET_L4_DST_PORT:
6905 if (is_ip_any(flow) && !(flow->nw_frag & FLOW_NW_FRAG_LATER)) {
6906 memset(&wc->masks.nw_proto, 0xff, sizeof wc->masks.nw_proto);
6907 memset(&wc->masks.tp_dst, 0xff, sizeof wc->masks.tp_dst);
6908 flow->tp_dst = htons(ofpact_get_SET_L4_DST_PORT(a)->port);
6909 }
6910 break;
6911
6912 case OFPACT_RESUBMIT:
6913 /* Freezing complicates resubmit. Some action in the flow
6914 * entry found by resubmit might trigger freezing. If that
6915 * happens, then we do not want to execute the resubmit again after
6916 * during thawing, so we want to skip back to the head of the loop
6917 * to avoid that, only adding any actions that follow the resubmit
6918 * to the frozen actions.
6919 */
6920 xlate_ofpact_resubmit(ctx, ofpact_get_RESUBMIT(a), last);
6921 continue;
6922
6923 case OFPACT_SET_TUNNEL:
6924 flow->tunnel.tun_id = htonll(ofpact_get_SET_TUNNEL(a)->tun_id);
6925 break;
6926
6927 case OFPACT_SET_QUEUE:
6928 memset(&wc->masks.skb_priority, 0xff,
6929 sizeof wc->masks.skb_priority);
6930 xlate_set_queue_action(ctx, ofpact_get_SET_QUEUE(a)->queue_id);
6931 break;
6932
6933 case OFPACT_POP_QUEUE:
6934 memset(&wc->masks.skb_priority, 0xff,
6935 sizeof wc->masks.skb_priority);
6936 if (flow->skb_priority != ctx->orig_skb_priority) {
6937 flow->skb_priority = ctx->orig_skb_priority;
6938 xlate_report(ctx, OFT_DETAIL, "queue = %#"PRIx32,
6939 flow->skb_priority);
6940 }
6941 break;
6942
6943 case OFPACT_REG_MOVE:
6944 xlate_ofpact_reg_move(ctx, ofpact_get_REG_MOVE(a));
6945 break;
6946
6947 case OFPACT_SET_FIELD:
6948 set_field = ofpact_get_SET_FIELD(a);
6949 mf = set_field->field;
6950
6951 /* Set the field only if the packet actually has it. */
6952 if (mf_are_prereqs_ok(mf, flow, wc)) {
6953 mf_mask_field_masked(mf, ofpact_set_field_mask(set_field), wc);
6954 mf_set_flow_value_masked(mf, set_field->value,
6955 ofpact_set_field_mask(set_field),
6956 flow);
6957 } else {
6958 xlate_report(ctx, OFT_WARN,
6959 "unmet prerequisites for %s, set_field ignored",
6960 mf->name);
6961
6962 }
6963 break;
6964
6965 case OFPACT_STACK_PUSH:
6966 nxm_execute_stack_push(ofpact_get_STACK_PUSH(a), flow, wc,
6967 &ctx->stack);
6968 break;
6969
6970 case OFPACT_STACK_POP:
6971 xlate_ofpact_stack_pop(ctx, ofpact_get_STACK_POP(a));
6972 break;
6973
6974 case OFPACT_PUSH_MPLS:
6975 compose_mpls_push_action(ctx, ofpact_get_PUSH_MPLS(a));
6976 break;
6977
6978 case OFPACT_POP_MPLS:
6979 compose_mpls_pop_action(ctx, ofpact_get_POP_MPLS(a)->ethertype);
6980 break;
6981
6982 case OFPACT_SET_MPLS_LABEL:
6983 compose_set_mpls_label_action(
6984 ctx, ofpact_get_SET_MPLS_LABEL(a)->label);
6985 break;
6986
6987 case OFPACT_SET_MPLS_TC:
6988 compose_set_mpls_tc_action(ctx, ofpact_get_SET_MPLS_TC(a)->tc);
6989 break;
6990
6991 case OFPACT_SET_MPLS_TTL:
6992 compose_set_mpls_ttl_action(ctx, ofpact_get_SET_MPLS_TTL(a)->ttl);
6993 break;
6994
6995 case OFPACT_DEC_MPLS_TTL:
6996 if (compose_dec_mpls_ttl_action(ctx)) {
6997 return;
6998 }
6999 break;
7000
7001 case OFPACT_DEC_NSH_TTL:
7002 if (compose_dec_nsh_ttl_action(ctx)) {
7003 return;
7004 }
7005 break;
7006
7007 case OFPACT_DEC_TTL:
7008 wc->masks.nw_ttl = 0xff;
7009 if (compose_dec_ttl(ctx, ofpact_get_DEC_TTL(a))) {
7010 return;
7011 }
7012 break;
7013
7014 case OFPACT_NOTE:
7015 /* Nothing to do. */
7016 break;
7017
7018 case OFPACT_MULTIPATH:
7019 multipath_execute(ofpact_get_MULTIPATH(a), flow, wc);
7020 xlate_report_subfield(ctx, &ofpact_get_MULTIPATH(a)->dst);
7021 break;
7022
7023 case OFPACT_BUNDLE:
7024 xlate_bundle_action(ctx, ofpact_get_BUNDLE(a), last,
7025 group_bucket_action);
7026 break;
7027
7028 case OFPACT_OUTPUT_REG:
7029 xlate_output_reg_action(ctx, ofpact_get_OUTPUT_REG(a), last,
7030 group_bucket_action);
7031 break;
7032
7033 case OFPACT_OUTPUT_TRUNC:
7034 xlate_output_trunc_action(ctx, ofpact_get_OUTPUT_TRUNC(a)->port,
7035 ofpact_get_OUTPUT_TRUNC(a)->max_len, last,
7036 group_bucket_action);
7037 break;
7038
7039 case OFPACT_LEARN:
7040 xlate_learn_action(ctx, ofpact_get_LEARN(a));
7041 break;
7042
7043 case OFPACT_CONJUNCTION:
7044 /* A flow with a "conjunction" action represents part of a special
7045 * kind of "set membership match". Such a flow should not actually
7046 * get executed, but it could via, say, a "packet-out", even though
7047 * that wouldn't be useful. Log it to help debugging. */
7048 xlate_report_error(ctx, "executing no-op conjunction action");
7049 break;
7050
7051 case OFPACT_EXIT:
7052 ctx->exit = true;
7053 break;
7054
7055 case OFPACT_UNROLL_XLATE:
7056 xlate_ofpact_unroll_xlate(ctx, ofpact_get_UNROLL_XLATE(a));
7057 break;
7058
7059 case OFPACT_FIN_TIMEOUT:
7060 memset(&wc->masks.nw_proto, 0xff, sizeof wc->masks.nw_proto);
7061 xlate_fin_timeout(ctx, ofpact_get_FIN_TIMEOUT(a));
7062 break;
7063
7064 case OFPACT_DELETE_FIELD:
7065 xlate_delete_field(ctx, flow, ofpact_get_DELETE_FIELD(a));
7066 break;
7067
7068 case OFPACT_CLEAR_ACTIONS:
7069 xlate_report_action_set(ctx, "was");
7070 ofpbuf_clear(&ctx->action_set);
7071 ctx->xin->flow.actset_output = OFPP_UNSET;
7072 ctx->action_set_has_group = false;
7073 break;
7074
7075 case OFPACT_WRITE_ACTIONS:
7076 xlate_write_actions(ctx, ofpact_get_WRITE_ACTIONS(a));
7077 xlate_report_action_set(ctx, "is");
7078 break;
7079
7080 case OFPACT_WRITE_METADATA:
7081 metadata = ofpact_get_WRITE_METADATA(a);
7082 flow->metadata &= ~metadata->mask;
7083 flow->metadata |= metadata->metadata & metadata->mask;
7084 break;
7085
7086 case OFPACT_METER:
7087 xlate_meter_action(ctx, ofpact_get_METER(a));
7088 break;
7089
7090 case OFPACT_GOTO_TABLE: {
7091 struct ofpact_goto_table *ogt = ofpact_get_GOTO_TABLE(a);
7092
7093 ovs_assert(ctx->table_id < ogt->table_id);
7094
7095 xlate_table_action(ctx, ctx->xin->flow.in_port.ofp_port,
7096 ogt->table_id, true, true, false, last,
7097 do_xlate_actions);
7098 break;
7099 }
7100
7101 case OFPACT_SAMPLE:
7102 xlate_sample_action(ctx, ofpact_get_SAMPLE(a));
7103 break;
7104
7105 case OFPACT_CLONE:
7106 compose_clone(ctx, ofpact_get_CLONE(a), last);
7107 break;
7108
7109 case OFPACT_ENCAP:
7110 xlate_generic_encap_action(ctx, ofpact_get_ENCAP(a));
7111 break;
7112
7113 case OFPACT_DECAP: {
7114 bool recirc_needed =
7115 xlate_generic_decap_action(ctx, ofpact_get_DECAP(a));
7116 if (!ctx->error && recirc_needed) {
7117 /* Recirculate for parsing of inner packet. */
7118 ctx_trigger_freeze(ctx);
7119 /* Then continue with next action. */
7120 a = ofpact_next(a);
7121 }
7122 break;
7123 }
7124
7125 case OFPACT_CT:
7126 compose_conntrack_action(ctx, ofpact_get_CT(a), last);
7127 break;
7128
7129 case OFPACT_CT_CLEAR:
7130 compose_ct_clear_action(ctx);
7131 break;
7132
7133 case OFPACT_NAT:
7134 /* This will be processed by compose_conntrack_action(). */
7135 ctx->ct_nat_action = ofpact_get_NAT(a);
7136 break;
7137
7138 case OFPACT_DEBUG_RECIRC:
7139 ctx_trigger_freeze(ctx);
7140 a = ofpact_next(a);
7141 break;
7142
7143 case OFPACT_DEBUG_SLOW:
7144 ctx->xout->slow |= SLOW_ACTION;
7145 break;
7146
7147 case OFPACT_CHECK_PKT_LARGER: {
7148 if (last) {
7149 /* If this is last action, then there is no need to
7150 * translate the action. */
7151 break;
7152 }
7153 const struct ofpact *remaining_acts = ofpact_next(a);
7154 size_t remaining_acts_len = ofpact_remaining_len(remaining_acts,
7155 ofpacts,
7156 ofpacts_len);
7157 xlate_check_pkt_larger(ctx, ofpact_get_CHECK_PKT_LARGER(a),
7158 remaining_acts, remaining_acts_len);
7159 break;
7160 }
7161 }
7162
7163 /* Check if need to store this and the remaining actions for later
7164 * execution. */
7165 if (!ctx->error && ctx->exit && ctx_first_frozen_action(ctx)) {
7166 freeze_unroll_actions(a, ofpact_end(ofpacts, ofpacts_len), ctx);
7167 break;
7168 }
7169 }
7170 }
7171
7172 void
7173 xlate_in_init(struct xlate_in *xin, struct ofproto_dpif *ofproto,
7174 ovs_version_t version, const struct flow *flow,
7175 ofp_port_t in_port, struct rule_dpif *rule, uint16_t tcp_flags,
7176 const struct dp_packet *packet, struct flow_wildcards *wc,
7177 struct ofpbuf *odp_actions)
7178 {
7179 xin->ofproto = ofproto;
7180 xin->tables_version = version;
7181 xin->flow = *flow;
7182 xin->upcall_flow = flow;
7183 xin->flow.in_port.ofp_port = in_port;
7184 xin->flow.actset_output = OFPP_UNSET;
7185 xin->packet = packet;
7186 xin->allow_side_effects = packet != NULL;
7187 xin->rule = rule;
7188 xin->xcache = NULL;
7189 xin->ofpacts = NULL;
7190 xin->ofpacts_len = 0;
7191 xin->tcp_flags = tcp_flags;
7192 xin->trace = NULL;
7193 xin->resubmit_stats = NULL;
7194 xin->depth = 0;
7195 xin->resubmits = 0;
7196 xin->wc = wc;
7197 xin->odp_actions = odp_actions;
7198 xin->in_packet_out = false;
7199 xin->recirc_queue = NULL;
7200 xin->xport_uuid = UUID_ZERO;
7201
7202 /* Do recirc lookup. */
7203 xin->frozen_state = NULL;
7204 if (flow->recirc_id) {
7205 const struct recirc_id_node *node
7206 = recirc_id_node_find(flow->recirc_id);
7207 if (node) {
7208 xin->frozen_state = &node->state;
7209 }
7210 }
7211 }
7212
7213 void
7214 xlate_out_uninit(struct xlate_out *xout)
7215 {
7216 if (xout) {
7217 recirc_refs_unref(&xout->recircs);
7218 }
7219 }
7220 \f
7221 static struct skb_priority_to_dscp *
7222 get_skb_priority(const struct xport *xport, uint32_t skb_priority)
7223 {
7224 struct skb_priority_to_dscp *pdscp;
7225 uint32_t hash;
7226
7227 hash = hash_int(skb_priority, 0);
7228 HMAP_FOR_EACH_IN_BUCKET (pdscp, hmap_node, hash, &xport->skb_priorities) {
7229 if (pdscp->skb_priority == skb_priority) {
7230 return pdscp;
7231 }
7232 }
7233 return NULL;
7234 }
7235
7236 static bool
7237 dscp_from_skb_priority(const struct xport *xport, uint32_t skb_priority,
7238 uint8_t *dscp)
7239 {
7240 struct skb_priority_to_dscp *pdscp = get_skb_priority(xport, skb_priority);
7241 *dscp = pdscp ? pdscp->dscp : 0;
7242 return pdscp != NULL;
7243 }
7244
7245 static size_t
7246 count_skb_priorities(const struct xport *xport)
7247 {
7248 return hmap_count(&xport->skb_priorities);
7249 }
7250
7251 static void
7252 clear_skb_priorities(struct xport *xport)
7253 {
7254 struct skb_priority_to_dscp *pdscp;
7255
7256 HMAP_FOR_EACH_POP (pdscp, hmap_node, &xport->skb_priorities) {
7257 free(pdscp);
7258 }
7259 }
7260
7261 static bool
7262 actions_output_to_local_port(const struct xlate_ctx *ctx)
7263 {
7264 odp_port_t local_odp_port = ofp_port_to_odp_port(ctx->xbridge, OFPP_LOCAL);
7265 const struct nlattr *a;
7266 unsigned int left;
7267
7268 NL_ATTR_FOR_EACH_UNSAFE (a, left, ctx->odp_actions->data,
7269 ctx->odp_actions->size) {
7270 if (nl_attr_type(a) == OVS_ACTION_ATTR_OUTPUT
7271 && nl_attr_get_odp_port(a) == local_odp_port) {
7272 return true;
7273 }
7274 }
7275 return false;
7276 }
7277
7278 #if defined(__linux__)
7279 /* Returns the maximum number of packets that the Linux kernel is willing to
7280 * queue up internally to certain kinds of software-implemented ports, or the
7281 * default (and rarely modified) value if it cannot be determined. */
7282 static int
7283 netdev_max_backlog(void)
7284 {
7285 static struct ovsthread_once once = OVSTHREAD_ONCE_INITIALIZER;
7286 static int max_backlog = 1000; /* The normal default value. */
7287
7288 if (ovsthread_once_start(&once)) {
7289 static const char filename[] = "/proc/sys/net/core/netdev_max_backlog";
7290 FILE *stream;
7291 int n;
7292
7293 stream = fopen(filename, "r");
7294 if (!stream) {
7295 VLOG_INFO("%s: open failed (%s)", filename, ovs_strerror(errno));
7296 } else {
7297 if (fscanf(stream, "%d", &n) != 1) {
7298 VLOG_WARN("%s: read error", filename);
7299 } else if (n <= 100) {
7300 VLOG_WARN("%s: unexpectedly small value %d", filename, n);
7301 } else {
7302 max_backlog = n;
7303 }
7304 fclose(stream);
7305 }
7306 ovsthread_once_done(&once);
7307
7308 VLOG_DBG("%s: using %d max_backlog", filename, max_backlog);
7309 }
7310
7311 return max_backlog;
7312 }
7313
7314 /* Counts and returns the number of OVS_ACTION_ATTR_OUTPUT actions in
7315 * 'odp_actions'. */
7316 static int
7317 count_output_actions(const struct ofpbuf *odp_actions)
7318 {
7319 const struct nlattr *a;
7320 size_t left;
7321 int n = 0;
7322
7323 NL_ATTR_FOR_EACH_UNSAFE (a, left, odp_actions->data, odp_actions->size) {
7324 if ((a->nla_type == OVS_ACTION_ATTR_OUTPUT) ||
7325 (a->nla_type == OVS_ACTION_ATTR_LB_OUTPUT)) {
7326 n++;
7327 }
7328 }
7329 return n;
7330 }
7331 #endif /* defined(__linux__) */
7332
7333 /* Returns true if 'odp_actions' contains more output actions than the datapath
7334 * can reliably handle in one go. On Linux, this is the value of the
7335 * net.core.netdev_max_backlog sysctl, which limits the maximum number of
7336 * packets that the kernel is willing to queue up for processing while the
7337 * datapath is processing a set of actions. */
7338 static bool
7339 too_many_output_actions(const struct ofpbuf *odp_actions OVS_UNUSED)
7340 {
7341 #ifdef __linux__
7342 return (odp_actions->size / NL_A_U32_SIZE > netdev_max_backlog()
7343 && count_output_actions(odp_actions) > netdev_max_backlog());
7344 #else
7345 /* OSes other than Linux might have similar limits, but we don't know how
7346 * to determine them.*/
7347 return false;
7348 #endif
7349 }
7350
7351 static void
7352 xlate_wc_init(struct xlate_ctx *ctx)
7353 {
7354 flow_wildcards_init_catchall(ctx->wc);
7355
7356 /* Some fields we consider to always be examined. */
7357 WC_MASK_FIELD(ctx->wc, packet_type);
7358 WC_MASK_FIELD(ctx->wc, in_port);
7359 WC_MASK_FIELD(ctx->wc, dl_type);
7360 if (is_ip_any(&ctx->xin->flow)) {
7361 WC_MASK_FIELD_MASK(ctx->wc, nw_frag, FLOW_NW_FRAG_MASK);
7362 }
7363
7364 if (ctx->xbridge->support.odp.recirc) {
7365 /* Always exactly match recirc_id when datapath supports
7366 * recirculation. */
7367 WC_MASK_FIELD(ctx->wc, recirc_id);
7368 }
7369
7370 if (ctx->xbridge->netflow) {
7371 netflow_mask_wc(&ctx->xin->flow, ctx->wc);
7372 }
7373
7374 tnl_wc_init(&ctx->xin->flow, ctx->wc);
7375 }
7376
7377 static void
7378 xlate_wc_finish(struct xlate_ctx *ctx)
7379 {
7380 int i;
7381
7382 /* Clear the metadata and register wildcard masks, because we won't
7383 * use non-header fields as part of the cache. */
7384 flow_wildcards_clear_non_packet_fields(ctx->wc);
7385
7386 /* Wildcard Ethernet address fields if the original packet type was not
7387 * Ethernet.
7388 *
7389 * (The Ethertype field is used even when the original packet type is not
7390 * Ethernet.) */
7391 if (ctx->xin->upcall_flow->packet_type != htonl(PT_ETH)) {
7392 ctx->wc->masks.dl_dst = eth_addr_zero;
7393 ctx->wc->masks.dl_src = eth_addr_zero;
7394 }
7395
7396 /* ICMPv4 and ICMPv6 have 8-bit "type" and "code" fields. struct flow
7397 * uses the low 8 bits of the 16-bit tp_src and tp_dst members to
7398 * represent these fields. The datapath interface, on the other hand,
7399 * represents them with just 8 bits each. This means that if the high
7400 * 8 bits of the masks for these fields somehow become set, then they
7401 * will get chopped off by a round trip through the datapath, and
7402 * revalidation will spot that as an inconsistency and delete the flow.
7403 * Avoid the problem here by making sure that only the low 8 bits of
7404 * either field can be unwildcarded for ICMP.
7405 */
7406 if (is_icmpv4(&ctx->xin->flow, NULL) || is_icmpv6(&ctx->xin->flow, NULL)) {
7407 ctx->wc->masks.tp_src &= htons(UINT8_MAX);
7408 ctx->wc->masks.tp_dst &= htons(UINT8_MAX);
7409 }
7410 /* VLAN_TCI CFI bit must be matched if any of the TCI is matched. */
7411 for (i = 0; i < FLOW_MAX_VLAN_HEADERS; i++) {
7412 if (ctx->wc->masks.vlans[i].tci) {
7413 ctx->wc->masks.vlans[i].tci |= htons(VLAN_CFI);
7414 }
7415 }
7416
7417 /* The classifier might return masks that match on tp_src and tp_dst even
7418 * for later fragments. This happens because there might be flows that
7419 * match on tp_src or tp_dst without matching on the frag bits, because
7420 * it is not a prerequisite for OpenFlow. Since it is a prerequisite for
7421 * datapath flows and since tp_src and tp_dst are always going to be 0,
7422 * wildcard the fields here. */
7423 if (ctx->xin->flow.nw_frag & FLOW_NW_FRAG_LATER) {
7424 ctx->wc->masks.tp_src = 0;
7425 ctx->wc->masks.tp_dst = 0;
7426 }
7427
7428 /* Clear flow wildcard bits for fields which are not present
7429 * in the original packet header. These wildcards may get set
7430 * due to push/set_field actions. This results into frequent
7431 * invalidation of datapath flows by revalidator thread. */
7432
7433 /* Clear mpls label wc bits if original packet is non-mpls. */
7434 if (!eth_type_mpls(ctx->xin->upcall_flow->dl_type)) {
7435 for (i = 0; i < FLOW_MAX_MPLS_LABELS; i++) {
7436 ctx->wc->masks.mpls_lse[i] = 0;
7437 }
7438 }
7439 /* Clear vlan header wc bits if original packet does not have
7440 * vlan header. */
7441 for (i = 0; i < FLOW_MAX_VLAN_HEADERS; i++) {
7442 if (!eth_type_vlan(ctx->xin->upcall_flow->vlans[i].tpid)) {
7443 ctx->wc->masks.vlans[i].tpid = 0;
7444 ctx->wc->masks.vlans[i].tci = 0;
7445 }
7446 }
7447 }
7448
7449 /* Translates the flow, actions, or rule in 'xin' into datapath actions in
7450 * 'xout'.
7451 * The caller must take responsibility for eventually freeing 'xout', with
7452 * xlate_out_uninit().
7453 * Returns 'XLATE_OK' if translation was successful. In case of an error an
7454 * empty set of actions will be returned in 'xin->odp_actions' (if non-NULL),
7455 * so that most callers may ignore the return value and transparently install a
7456 * drop flow when the translation fails. */
7457 enum xlate_error
7458 xlate_actions(struct xlate_in *xin, struct xlate_out *xout)
7459 {
7460 *xout = (struct xlate_out) {
7461 .slow = 0,
7462 .recircs = RECIRC_REFS_EMPTY_INITIALIZER,
7463 };
7464
7465 struct xlate_cfg *xcfg = ovsrcu_get(struct xlate_cfg *, &xcfgp);
7466 struct xbridge *xbridge = xbridge_lookup(xcfg, xin->ofproto);
7467 if (!xbridge) {
7468 return XLATE_BRIDGE_NOT_FOUND;
7469 }
7470
7471 struct flow *flow = &xin->flow;
7472
7473 uint8_t stack_stub[1024];
7474 uint64_t action_set_stub[1024 / 8];
7475 uint64_t frozen_actions_stub[1024 / 8];
7476 uint64_t actions_stub[256 / 8];
7477 struct ofpbuf scratch_actions = OFPBUF_STUB_INITIALIZER(actions_stub);
7478 struct xlate_ctx ctx = {
7479 .xin = xin,
7480 .xout = xout,
7481 .base_flow = *flow,
7482 .orig_tunnel_ipv6_dst = flow_tnl_dst(&flow->tunnel),
7483 .xcfg = xcfg,
7484 .xbridge = xbridge,
7485 .stack = OFPBUF_STUB_INITIALIZER(stack_stub),
7486 .rule = xin->rule,
7487 .wc = (xin->wc
7488 ? xin->wc
7489 : &(struct flow_wildcards) { .masks = { .dl_type = 0 } }),
7490 .odp_actions = xin->odp_actions ? xin->odp_actions : &scratch_actions,
7491
7492 .depth = xin->depth,
7493 .resubmits = xin->resubmits,
7494 .in_action_set = false,
7495 .in_packet_out = xin->in_packet_out,
7496 .pending_encap = false,
7497 .pending_decap = false,
7498 .encap_data = NULL,
7499
7500 .table_id = 0,
7501 .rule_cookie = OVS_BE64_MAX,
7502 .orig_skb_priority = flow->skb_priority,
7503 .sflow_n_outputs = 0,
7504 .sflow_odp_port = 0,
7505 .nf_output_iface = NF_OUT_DROP,
7506 .exit = false,
7507 .error = XLATE_OK,
7508 .mirrors = 0,
7509
7510 .freezing = false,
7511 .recirc_update_dp_hash = false,
7512 .frozen_actions = OFPBUF_STUB_INITIALIZER(frozen_actions_stub),
7513 .pause = NULL,
7514
7515 .was_mpls = false,
7516 .conntracked = false,
7517
7518 .ct_nat_action = NULL,
7519
7520 .action_set_has_group = false,
7521 .action_set = OFPBUF_STUB_INITIALIZER(action_set_stub),
7522 };
7523
7524 /* 'base_flow' reflects the packet as it came in, but we need it to reflect
7525 * the packet as the datapath will treat it for output actions. Our
7526 * datapath doesn't retain tunneling information without us re-setting
7527 * it, so clear the tunnel data.
7528 */
7529
7530 memset(&ctx.base_flow.tunnel, 0, sizeof ctx.base_flow.tunnel);
7531
7532 ofpbuf_reserve(ctx.odp_actions, NL_A_U32_SIZE);
7533 xlate_wc_init(&ctx);
7534
7535 COVERAGE_INC(xlate_actions);
7536
7537 xin->trace = xlate_report(&ctx, OFT_BRIDGE, "bridge(\"%s\")",
7538 xbridge->name);
7539 if (xin->frozen_state) {
7540 const struct frozen_state *state = xin->frozen_state;
7541
7542 struct ovs_list *old_trace = xin->trace;
7543 xin->trace = xlate_report(&ctx, OFT_THAW, "thaw");
7544
7545 if (xin->ofpacts_len > 0 || ctx.rule) {
7546 xlate_report_error(&ctx, "Recirculation conflict (%s)!",
7547 xin->ofpacts_len ? "actions" : "rule");
7548 ctx.error = XLATE_RECIRCULATION_CONFLICT;
7549 goto exit;
7550 }
7551
7552 /* Set the bridge for post-recirculation processing if needed. */
7553 if (!uuid_equals(&ctx.xbridge->ofproto->uuid, &state->ofproto_uuid)) {
7554 const struct xbridge *new_bridge
7555 = xbridge_lookup_by_uuid(xcfg, &state->ofproto_uuid);
7556
7557 if (OVS_UNLIKELY(!new_bridge)) {
7558 /* Drop the packet if the bridge cannot be found. */
7559 xlate_report_error(&ctx, "Frozen bridge no longer exists.");
7560 ctx.error = XLATE_BRIDGE_NOT_FOUND;
7561 xin->trace = old_trace;
7562 goto exit;
7563 }
7564 ctx.xbridge = new_bridge;
7565 /* The bridge is now known so obtain its table version. */
7566 ctx.xin->tables_version
7567 = ofproto_dpif_get_tables_version(ctx.xbridge->ofproto);
7568 }
7569
7570 /* Set the thawed table id. Note: A table lookup is done only if there
7571 * are no frozen actions. */
7572 ctx.table_id = state->table_id;
7573 xlate_report(&ctx, OFT_THAW,
7574 "Resuming from table %"PRIu8, ctx.table_id);
7575
7576 ctx.conntracked = state->conntracked;
7577 if (!state->conntracked) {
7578 clear_conntrack(&ctx);
7579 }
7580
7581 /* Restore pipeline metadata. May change flow's in_port and other
7582 * metadata to the values that existed when freezing was triggered. */
7583 frozen_metadata_to_flow(&ctx.xbridge->ofproto->up,
7584 &state->metadata, flow);
7585
7586 /* Restore stack, if any. */
7587 if (state->stack) {
7588 ofpbuf_put(&ctx.stack, state->stack, state->stack_size);
7589 }
7590
7591 /* Restore mirror state. */
7592 ctx.mirrors = state->mirrors;
7593
7594 /* Restore action set, if any. */
7595 if (state->action_set_len) {
7596 xlate_report_actions(&ctx, OFT_THAW, "Restoring action set",
7597 state->action_set, state->action_set_len);
7598
7599 flow->actset_output = OFPP_UNSET;
7600 xlate_write_actions__(&ctx, state->action_set,
7601 state->action_set_len);
7602 }
7603
7604 /* Restore frozen actions. If there are no actions, processing will
7605 * start with a lookup in the table set above. */
7606 xin->ofpacts = state->ofpacts;
7607 xin->ofpacts_len = state->ofpacts_len;
7608 if (state->ofpacts_len) {
7609 xlate_report_actions(&ctx, OFT_THAW, "Restoring actions",
7610 xin->ofpacts, xin->ofpacts_len);
7611 }
7612
7613 xin->trace = old_trace;
7614 } else if (OVS_UNLIKELY(flow->recirc_id)) {
7615 xlate_report_error(&ctx,
7616 "Recirculation context not found for ID %"PRIx32,
7617 flow->recirc_id);
7618 ctx.error = XLATE_NO_RECIRCULATION_CONTEXT;
7619 goto exit;
7620 }
7621
7622 /* Tunnel metadata in udpif format must be normalized before translation. */
7623 if (flow->tunnel.flags & FLOW_TNL_F_UDPIF) {
7624 const struct tun_table *tun_tab = ofproto_get_tun_tab(
7625 &ctx.xbridge->ofproto->up);
7626 int err;
7627
7628 err = tun_metadata_from_geneve_udpif(tun_tab, &xin->upcall_flow->tunnel,
7629 &xin->upcall_flow->tunnel,
7630 &flow->tunnel);
7631 if (err) {
7632 xlate_report_error(&ctx, "Invalid Geneve tunnel metadata");
7633 ctx.error = XLATE_INVALID_TUNNEL_METADATA;
7634 goto exit;
7635 }
7636 } else if (!flow->tunnel.metadata.tab) {
7637 /* If the original flow did not come in on a tunnel, then it won't have
7638 * FLOW_TNL_F_UDPIF set. However, we still need to have a metadata
7639 * table in case we generate tunnel actions. */
7640 flow->tunnel.metadata.tab = ofproto_get_tun_tab(
7641 &ctx.xbridge->ofproto->up);
7642 }
7643 ctx.wc->masks.tunnel.metadata.tab = flow->tunnel.metadata.tab;
7644
7645 /* Get the proximate input port of the packet. (If xin->frozen_state,
7646 * flow->in_port is the ultimate input port of the packet.) */
7647 struct xport *in_port = get_ofp_port(xbridge,
7648 ctx.base_flow.in_port.ofp_port);
7649 if (in_port && !in_port->peer) {
7650 ctx.xin->xport_uuid = in_port->uuid;
7651 }
7652
7653 if (flow->packet_type != htonl(PT_ETH) && in_port &&
7654 in_port->pt_mode == NETDEV_PT_LEGACY_L3 && ctx.table_id == 0) {
7655 /* Add dummy Ethernet header to non-L2 packet if it's coming from a
7656 * L3 port. So all packets will be L2 packets for lookup.
7657 * The dl_type has already been set from the packet_type. */
7658 flow->packet_type = htonl(PT_ETH);
7659 flow->dl_src = eth_addr_zero;
7660 flow->dl_dst = eth_addr_zero;
7661 ctx.pending_encap = true;
7662 }
7663
7664 if (!xin->ofpacts && !ctx.rule) {
7665 ctx.rule = rule_dpif_lookup_from_table(
7666 ctx.xbridge->ofproto, ctx.xin->tables_version, flow, ctx.wc,
7667 ctx.xin->resubmit_stats, &ctx.table_id,
7668 flow->in_port.ofp_port, true, true, ctx.xin->xcache);
7669 if (ctx.xin->resubmit_stats) {
7670 rule_dpif_credit_stats(ctx.rule, ctx.xin->resubmit_stats, false);
7671 }
7672 if (ctx.xin->xcache) {
7673 struct xc_entry *entry;
7674
7675 entry = xlate_cache_add_entry(ctx.xin->xcache, XC_RULE);
7676 entry->rule = ctx.rule;
7677 ofproto_rule_ref(&ctx.rule->up);
7678 }
7679
7680 xlate_report_table(&ctx, ctx.rule, ctx.table_id);
7681 }
7682
7683 /* Tunnel stats only for not-thawed packets. */
7684 if (!xin->frozen_state && in_port && in_port->is_tunnel) {
7685 if (ctx.xin->resubmit_stats) {
7686 netdev_vport_inc_rx(in_port->netdev, ctx.xin->resubmit_stats);
7687 if (in_port->bfd) {
7688 bfd_account_rx(in_port->bfd, ctx.xin->resubmit_stats);
7689 }
7690 }
7691 if (ctx.xin->xcache) {
7692 struct xc_entry *entry;
7693
7694 entry = xlate_cache_add_entry(ctx.xin->xcache, XC_NETDEV);
7695 entry->dev.rx = netdev_ref(in_port->netdev);
7696 entry->dev.bfd = bfd_ref(in_port->bfd);
7697 }
7698 }
7699
7700 if (!xin->frozen_state && process_special(&ctx, in_port)) {
7701 /* process_special() did all the processing for this packet.
7702 *
7703 * We do not perform special processing on thawed packets, since that
7704 * was done before they were frozen and should not be redone. */
7705 mirror_ingress_packet(&ctx);
7706 } else if (in_port && in_port->xbundle
7707 && xbundle_mirror_out(xbridge, in_port->xbundle)) {
7708 xlate_report_error(&ctx, "dropping packet received on port "
7709 "%s, which is reserved exclusively for mirroring",
7710 in_port->xbundle->name);
7711 } else {
7712 /* Sampling is done on initial reception; don't redo after thawing. */
7713 unsigned int user_cookie_offset = 0;
7714 if (!xin->frozen_state) {
7715 user_cookie_offset = compose_sflow_action(&ctx);
7716 compose_ipfix_action(&ctx, ODPP_NONE);
7717 }
7718 size_t sample_actions_len = ctx.odp_actions->size;
7719 bool ecn_drop = !tnl_process_ecn(flow);
7720
7721 if (!ecn_drop
7722 && (!in_port || may_receive(in_port, &ctx))) {
7723 const struct ofpact *ofpacts;
7724 size_t ofpacts_len;
7725
7726 if (xin->ofpacts) {
7727 ofpacts = xin->ofpacts;
7728 ofpacts_len = xin->ofpacts_len;
7729 } else if (ctx.rule) {
7730 const struct rule_actions *actions
7731 = rule_get_actions(&ctx.rule->up);
7732 ofpacts = actions->ofpacts;
7733 ofpacts_len = actions->ofpacts_len;
7734 ctx.rule_cookie = ctx.rule->up.flow_cookie;
7735 } else {
7736 OVS_NOT_REACHED();
7737 }
7738
7739 mirror_ingress_packet(&ctx);
7740 do_xlate_actions(ofpacts, ofpacts_len, &ctx, true, false);
7741 if (ctx.error) {
7742 goto exit;
7743 }
7744
7745 /* We've let OFPP_NORMAL and the learning action look at the
7746 * packet, so cancel all actions and freezing if forwarding is
7747 * disabled. */
7748 if (in_port && (!xport_stp_forward_state(in_port) ||
7749 !xport_rstp_forward_state(in_port))) {
7750 ctx.odp_actions->size = sample_actions_len;
7751 ctx_cancel_freeze(&ctx);
7752 ofpbuf_clear(&ctx.action_set);
7753 ctx.error = XLATE_FORWARDING_DISABLED;
7754 }
7755
7756 if (!ctx.freezing) {
7757 xlate_action_set(&ctx);
7758 }
7759 if (ctx.freezing) {
7760 finish_freezing(&ctx);
7761 }
7762 } else if (ecn_drop) {
7763 ctx.error = XLATE_CONGESTION_DROP;
7764 }
7765
7766 /* Output only fully processed packets. */
7767 if (!ctx.freezing
7768 && xbridge->has_in_band
7769 && in_band_must_output_to_local_port(flow)
7770 && !actions_output_to_local_port(&ctx)) {
7771 WC_MASK_FIELD(ctx.wc, nw_proto);
7772 WC_MASK_FIELD(ctx.wc, tp_src);
7773 WC_MASK_FIELD(ctx.wc, tp_dst);
7774 WC_MASK_FIELD(ctx.wc, dl_type);
7775 xlate_report(&ctx, OFT_DETAIL, "outputting DHCP packet "
7776 "to local port for in-band control");
7777 compose_output_action(&ctx, OFPP_LOCAL, NULL, false, false);
7778 }
7779
7780 if (user_cookie_offset) {
7781 fix_sflow_action(&ctx, user_cookie_offset);
7782 }
7783 }
7784
7785 if (nl_attr_oversized(ctx.odp_actions->size)) {
7786 /* These datapath actions are too big for a Netlink attribute, so we
7787 * can't hand them to the kernel directly. dpif_execute() can execute
7788 * them one by one with help, so just mark the result as SLOW_ACTION to
7789 * prevent the flow from being installed. */
7790 COVERAGE_INC(xlate_actions_oversize);
7791 ctx.xout->slow |= SLOW_ACTION;
7792 } else if (too_many_output_actions(ctx.odp_actions)) {
7793 COVERAGE_INC(xlate_actions_too_many_output);
7794 ctx.xout->slow |= SLOW_ACTION;
7795 }
7796
7797 /* Update NetFlow for non-frozen traffic. */
7798 if (xbridge->netflow && !xin->frozen_state) {
7799 if (ctx.xin->resubmit_stats) {
7800 netflow_flow_update(xbridge->netflow, flow,
7801 ctx.nf_output_iface,
7802 ctx.xin->resubmit_stats);
7803 }
7804 if (ctx.xin->xcache) {
7805 struct xc_entry *entry;
7806
7807 entry = xlate_cache_add_entry(ctx.xin->xcache, XC_NETFLOW);
7808 entry->nf.netflow = netflow_ref(xbridge->netflow);
7809 entry->nf.flow = xmemdup(flow, sizeof *flow);
7810 entry->nf.iface = ctx.nf_output_iface;
7811 }
7812 }
7813
7814 /* Translate tunnel metadata masks to udpif format if necessary. */
7815 if (xin->upcall_flow->tunnel.flags & FLOW_TNL_F_UDPIF) {
7816 if (ctx.wc->masks.tunnel.metadata.present.map) {
7817 const struct flow_tnl *upcall_tnl = &xin->upcall_flow->tunnel;
7818 struct geneve_opt opts[TLV_TOT_OPT_SIZE /
7819 sizeof(struct geneve_opt)];
7820
7821 tun_metadata_to_geneve_udpif_mask(&flow->tunnel,
7822 &ctx.wc->masks.tunnel,
7823 upcall_tnl->metadata.opts.gnv,
7824 upcall_tnl->metadata.present.len,
7825 opts);
7826 memset(&ctx.wc->masks.tunnel.metadata, 0,
7827 sizeof ctx.wc->masks.tunnel.metadata);
7828 memcpy(&ctx.wc->masks.tunnel.metadata.opts.gnv, opts,
7829 upcall_tnl->metadata.present.len);
7830 }
7831 ctx.wc->masks.tunnel.metadata.present.len = 0xff;
7832 ctx.wc->masks.tunnel.metadata.tab = NULL;
7833 ctx.wc->masks.tunnel.flags |= FLOW_TNL_F_UDPIF;
7834 } else if (!xin->upcall_flow->tunnel.metadata.tab) {
7835 /* If we didn't have options in UDPIF format and didn't have an existing
7836 * metadata table, then it means that there were no options at all when
7837 * we started processing and any wildcards we picked up were from
7838 * action generation. Without options on the incoming packet, wildcards
7839 * aren't meaningful. To avoid them possibly getting misinterpreted,
7840 * just clear everything. */
7841 if (ctx.wc->masks.tunnel.metadata.present.map) {
7842 memset(&ctx.wc->masks.tunnel.metadata, 0,
7843 sizeof ctx.wc->masks.tunnel.metadata);
7844 } else {
7845 ctx.wc->masks.tunnel.metadata.tab = NULL;
7846 }
7847 }
7848
7849 xlate_wc_finish(&ctx);
7850
7851 exit:
7852 /* Reset the table to what it was when we came in. If we only fetched
7853 * it locally, then it has no meaning outside of flow translation. */
7854 flow->tunnel.metadata.tab = xin->upcall_flow->tunnel.metadata.tab;
7855
7856 ofpbuf_uninit(&ctx.stack);
7857 ofpbuf_uninit(&ctx.action_set);
7858 ofpbuf_uninit(&ctx.frozen_actions);
7859 ofpbuf_uninit(&scratch_actions);
7860 ofpbuf_delete(ctx.encap_data);
7861
7862 /* Make sure we return a "drop flow" in case of an error. */
7863 if (ctx.error) {
7864 xout->slow = 0;
7865 if (xin->odp_actions) {
7866 ofpbuf_clear(xin->odp_actions);
7867 }
7868 }
7869
7870 /* Install drop action if datapath supports explicit drop action. */
7871 if (xin->odp_actions && !xin->odp_actions->size &&
7872 ovs_explicit_drop_action_supported(ctx.xbridge->ofproto)) {
7873 put_drop_action(xin->odp_actions, ctx.error);
7874 }
7875
7876 /* Since congestion drop and forwarding drop are not exactly
7877 * translation error, we are resetting the translation error.
7878 */
7879 if (ctx.error == XLATE_CONGESTION_DROP ||
7880 ctx.error == XLATE_FORWARDING_DISABLED) {
7881 ctx.error = XLATE_OK;
7882 }
7883
7884 return ctx.error;
7885 }
7886
7887 enum ofperr
7888 xlate_resume(struct ofproto_dpif *ofproto,
7889 const struct ofputil_packet_in_private *pin,
7890 struct ofpbuf *odp_actions,
7891 enum slow_path_reason *slow,
7892 struct flow *flow,
7893 struct xlate_cache *xcache)
7894 {
7895 struct dp_packet packet;
7896 dp_packet_use_const(&packet, pin->base.packet,
7897 pin->base.packet_len);
7898
7899 pkt_metadata_from_flow(&packet.md, &pin->base.flow_metadata.flow);
7900 flow_extract(&packet, flow);
7901
7902 struct xlate_in xin;
7903 xlate_in_init(&xin, ofproto, ofproto_dpif_get_tables_version(ofproto),
7904 flow, 0, NULL, ntohs(flow->tcp_flags),
7905 &packet, NULL, odp_actions);
7906 xin.xcache = xcache;
7907
7908 struct ofpact_note noop;
7909 ofpact_init_NOTE(&noop);
7910 noop.length = 0;
7911
7912 bool any_actions = pin->actions_len > 0;
7913 struct frozen_state state = {
7914 .table_id = 0, /* Not the table where NXAST_PAUSE was executed. */
7915 .ofproto_uuid = pin->bridge,
7916 .stack = pin->stack,
7917 .stack_size = pin->stack_size,
7918 .mirrors = pin->mirrors,
7919 .conntracked = pin->conntracked,
7920 .xport_uuid = UUID_ZERO,
7921
7922 /* When there are no actions, xlate_actions() will search the flow
7923 * table. We don't want it to do that (we want it to resume), so
7924 * supply a no-op action if there aren't any.
7925 *
7926 * (We can't necessarily avoid translating actions entirely if there
7927 * aren't any actions, because there might be some finishing-up to do
7928 * at the end of the pipeline, and we don't check for those
7929 * conditions.) */
7930 .ofpacts = any_actions ? pin->actions : &noop.ofpact,
7931 .ofpacts_len = any_actions ? pin->actions_len : sizeof noop,
7932
7933 .action_set = pin->action_set,
7934 .action_set_len = pin->action_set_len,
7935 };
7936 frozen_metadata_from_flow(&state.metadata,
7937 &pin->base.flow_metadata.flow);
7938 xin.frozen_state = &state;
7939
7940 struct xlate_out xout;
7941 enum xlate_error error = xlate_actions(&xin, &xout);
7942 *slow = xout.slow;
7943 xlate_out_uninit(&xout);
7944
7945 /* xlate_actions() can generate a number of errors, but only
7946 * XLATE_BRIDGE_NOT_FOUND really stands out to me as one that we should be
7947 * sure to report over OpenFlow. The others could come up in packet-outs
7948 * or regular flow translation and I don't think that it's going to be too
7949 * useful to report them to the controller. */
7950 return error == XLATE_BRIDGE_NOT_FOUND ? OFPERR_NXR_STALE : 0;
7951 }
7952
7953 /* Sends 'packet' out 'ofport'. If 'port' is a tunnel and that tunnel type
7954 * supports a notion of an OAM flag, sets it if 'oam' is true.
7955 * May modify 'packet'.
7956 * Returns 0 if successful, otherwise a positive errno value. */
7957 int
7958 xlate_send_packet(const struct ofport_dpif *ofport, bool oam,
7959 struct dp_packet *packet)
7960 {
7961 struct xlate_cfg *xcfg = ovsrcu_get(struct xlate_cfg *, &xcfgp);
7962 struct xport *xport;
7963 uint64_t ofpacts_stub[1024 / 8];
7964 struct ofpbuf ofpacts;
7965 struct flow flow;
7966
7967 ofpbuf_use_stack(&ofpacts, ofpacts_stub, sizeof ofpacts_stub);
7968 /* Use OFPP_NONE as the in_port to avoid special packet processing. */
7969 flow_extract(packet, &flow);
7970 flow.in_port.ofp_port = OFPP_NONE;
7971
7972 xport = xport_lookup(xcfg, ofport);
7973 if (!xport) {
7974 return EINVAL;
7975 }
7976
7977 if (oam) {
7978 const ovs_be16 flag = htons(NX_TUN_FLAG_OAM);
7979 ofpact_put_set_field(&ofpacts, mf_from_id(MFF_TUN_FLAGS),
7980 &flag, &flag);
7981 }
7982
7983 ofpact_put_OUTPUT(&ofpacts)->port = xport->ofp_port;
7984
7985 /* Actions here are not referring to anything versionable (flow tables or
7986 * groups) so we don't need to worry about the version here. */
7987 return ofproto_dpif_execute_actions(xport->xbridge->ofproto,
7988 OVS_VERSION_MAX, &flow, NULL,
7989 ofpacts.data, ofpacts.size, packet);
7990 }
7991
7992 void
7993 xlate_mac_learning_update(const struct ofproto_dpif *ofproto,
7994 ofp_port_t in_port, struct eth_addr dl_src,
7995 int vlan, bool is_grat_arp)
7996 {
7997 struct xlate_cfg *xcfg = ovsrcu_get(struct xlate_cfg *, &xcfgp);
7998 struct xbridge *xbridge;
7999 struct xbundle *xbundle;
8000
8001 xbridge = xbridge_lookup(xcfg, ofproto);
8002 if (!xbridge) {
8003 return;
8004 }
8005
8006 xbundle = lookup_input_bundle__(xbridge, in_port, NULL);
8007 if (!xbundle) {
8008 return;
8009 }
8010
8011 update_learning_table__(xbridge, xbundle, dl_src, vlan, is_grat_arp);
8012 }
8013
8014 void
8015 xlate_set_support(const struct ofproto_dpif *ofproto,
8016 const struct dpif_backer_support *support)
8017 {
8018 struct xlate_cfg *xcfg = ovsrcu_get(struct xlate_cfg *, &xcfgp);
8019 struct xbridge *xbridge = xbridge_lookup(xcfg, ofproto);
8020
8021 if (xbridge) {
8022 xbridge->support = *support;
8023 }
8024 }