1 /* Copyright (c) 2009, 2010, 2011, 2012, 2013, 2014, 2015 Nicira, Inc.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License. */
17 #include "ofproto/ofproto-dpif-xlate.h"
20 #include <arpa/inet.h>
22 #include <sys/socket.h>
23 #include <netinet/in.h>
25 #include "tnl-arp-cache.h"
30 #include "byte-order.h"
34 #include "dp-packet.h"
36 #include "dynamic-string.h"
42 #include "mac-learning.h"
43 #include "mcast-snooping.h"
44 #include "meta-flow.h"
45 #include "multipath.h"
46 #include "netdev-vport.h"
49 #include "odp-execute.h"
50 #include "ofp-actions.h"
51 #include "ofproto/ofproto-dpif-ipfix.h"
52 #include "ofproto/ofproto-dpif-mirror.h"
53 #include "ofproto/ofproto-dpif-monitor.h"
54 #include "ofproto/ofproto-dpif-sflow.h"
55 #include "ofproto/ofproto-dpif.h"
56 #include "ofproto/ofproto-provider.h"
57 #include "ovs-router.h"
58 #include "tnl-ports.h"
60 #include "openvswitch/vlog.h"
62 COVERAGE_DEFINE(xlate_actions
);
63 COVERAGE_DEFINE(xlate_actions_oversize
);
64 COVERAGE_DEFINE(xlate_actions_too_many_output
);
66 VLOG_DEFINE_THIS_MODULE(ofproto_dpif_xlate
);
68 /* Maximum depth of flow table recursion (due to resubmit actions) in a
69 * flow translation. */
70 #define MAX_RESUBMIT_RECURSION 64
71 #define MAX_INTERNAL_RESUBMITS 1 /* Max resbmits allowed using rules in
74 /* Maximum number of resubmit actions in a flow translation, whether they are
75 * recursive or not. */
76 #define MAX_RESUBMITS (MAX_RESUBMIT_RECURSION * MAX_RESUBMIT_RECURSION)
79 struct hmap_node hmap_node
; /* Node in global 'xbridges' map. */
80 struct ofproto_dpif
*ofproto
; /* Key in global 'xbridges' map. */
82 struct ovs_list xbundles
; /* Owned xbundles. */
83 struct hmap xports
; /* Indexed by ofp_port. */
85 char *name
; /* Name used in log messages. */
86 struct dpif
*dpif
; /* Datapath interface. */
87 struct mac_learning
*ml
; /* Mac learning handle. */
88 struct mcast_snooping
*ms
; /* Multicast Snooping handle. */
89 struct mbridge
*mbridge
; /* Mirroring. */
90 struct dpif_sflow
*sflow
; /* SFlow handle, or null. */
91 struct dpif_ipfix
*ipfix
; /* Ipfix handle, or null. */
92 struct netflow
*netflow
; /* Netflow handle, or null. */
93 struct stp
*stp
; /* STP or null if disabled. */
94 struct rstp
*rstp
; /* RSTP or null if disabled. */
96 bool has_in_band
; /* Bridge has in band control? */
97 bool forward_bpdu
; /* Bridge forwards STP BPDUs? */
99 /* True if the datapath supports recirculation. */
102 /* True if the datapath supports variable-length
103 * OVS_USERSPACE_ATTR_USERDATA in OVS_ACTION_ATTR_USERSPACE actions.
104 * False if the datapath supports only 8-byte (or shorter) userdata. */
105 bool variable_length_userdata
;
107 /* Number of MPLS label stack entries that the datapath supports
109 size_t max_mpls_depth
;
111 /* True if the datapath supports masked data in OVS_ACTION_ATTR_SET
113 bool masked_set_action
;
117 struct hmap_node hmap_node
; /* In global 'xbundles' map. */
118 struct ofbundle
*ofbundle
; /* Key in global 'xbundles' map. */
120 struct ovs_list list_node
; /* In parent 'xbridges' list. */
121 struct xbridge
*xbridge
; /* Parent xbridge. */
123 struct ovs_list xports
; /* Contains "struct xport"s. */
125 char *name
; /* Name used in log messages. */
126 struct bond
*bond
; /* Nonnull iff more than one port. */
127 struct lacp
*lacp
; /* LACP handle or null. */
129 enum port_vlan_mode vlan_mode
; /* VLAN mode. */
130 int vlan
; /* -1=trunk port, else a 12-bit VLAN ID. */
131 unsigned long *trunks
; /* Bitmap of trunked VLANs, if 'vlan' == -1.
132 * NULL if all VLANs are trunked. */
133 bool use_priority_tags
; /* Use 802.1p tag for frames in VLAN 0? */
134 bool floodable
; /* No port has OFPUTIL_PC_NO_FLOOD set? */
138 struct hmap_node hmap_node
; /* Node in global 'xports' map. */
139 struct ofport_dpif
*ofport
; /* Key in global 'xports map. */
141 struct hmap_node ofp_node
; /* Node in parent xbridge 'xports' map. */
142 ofp_port_t ofp_port
; /* Key in parent xbridge 'xports' map. */
144 odp_port_t odp_port
; /* Datapath port number or ODPP_NONE. */
146 struct ovs_list bundle_node
; /* In parent xbundle (if it exists). */
147 struct xbundle
*xbundle
; /* Parent xbundle or null. */
149 struct netdev
*netdev
; /* 'ofport''s netdev. */
151 struct xbridge
*xbridge
; /* Parent bridge. */
152 struct xport
*peer
; /* Patch port peer or null. */
154 enum ofputil_port_config config
; /* OpenFlow port configuration. */
155 enum ofputil_port_state state
; /* OpenFlow port state. */
156 int stp_port_no
; /* STP port number or -1 if not in use. */
157 struct rstp_port
*rstp_port
; /* RSTP port or null. */
159 struct hmap skb_priorities
; /* Map of 'skb_priority_to_dscp's. */
161 bool may_enable
; /* May be enabled in bonds. */
162 bool is_tunnel
; /* Is a tunnel port. */
164 struct cfm
*cfm
; /* CFM handle or null. */
165 struct bfd
*bfd
; /* BFD handle or null. */
166 struct lldp
*lldp
; /* LLDP handle or null. */
170 struct xlate_in
*xin
;
171 struct xlate_out
*xout
;
173 const struct xbridge
*xbridge
;
175 /* Flow at the last commit. */
176 struct flow base_flow
;
178 /* Tunnel IP destination address as received. This is stored separately
179 * as the base_flow.tunnel is cleared on init to reflect the datapath
180 * behavior. Used to make sure not to send tunneled output to ourselves,
181 * which might lead to an infinite loop. This could happen easily
182 * if a tunnel is marked as 'ip_remote=flow', and the flow does not
183 * actually set the tun_dst field. */
184 ovs_be32 orig_tunnel_ip_dst
;
186 /* Stack for the push and pop actions. Each stack element is of type
187 * "union mf_subvalue". */
188 union mf_subvalue init_stack
[1024 / sizeof(union mf_subvalue
)];
191 /* The rule that we are currently translating, or NULL. */
192 struct rule_dpif
*rule
;
194 /* Resubmit statistics, via xlate_table_action(). */
195 int recurse
; /* Current resubmit nesting depth. */
196 int resubmits
; /* Total number of resubmits. */
197 bool in_group
; /* Currently translating ofgroup, if true. */
198 bool in_action_set
; /* Currently translating action_set, if true. */
200 uint8_t table_id
; /* OpenFlow table ID where flow was found. */
201 ovs_be64 rule_cookie
; /* Cookie of the rule being translated. */
202 uint32_t orig_skb_priority
; /* Priority when packet arrived. */
203 uint32_t sflow_n_outputs
; /* Number of output ports. */
204 odp_port_t sflow_odp_port
; /* Output port for composing sFlow action. */
205 uint16_t user_cookie_offset
;/* Used for user_action_cookie fixup. */
206 bool exit
; /* No further actions should be processed. */
208 /* These are used for non-bond recirculation. The recirculation IDs are
209 * stored in xout and must be associated with a datapath flow (ukey),
210 * otherwise they will be freed when the xout is uninitialized.
213 * Steps in Recirculation Translation
214 * ==================================
216 * At some point during translation, the code recognizes the need for
217 * recirculation. For example, recirculation is necessary when, after
218 * popping the last MPLS label, an action or a match tries to examine or
219 * modify a field that has been newly revealed following the MPLS label.
221 * The simplest part of the work to be done is to commit existing changes to
222 * the packet, which produces datapath actions corresponding to the changes,
223 * and after this, add an OVS_ACTION_ATTR_RECIRC datapath action.
225 * The main problem here is preserving state. When the datapath executes
226 * OVS_ACTION_ATTR_RECIRC, it will upcall to userspace to get a translation
227 * for the post-recirculation actions. At this point userspace has to
228 * resume the translation where it left off, which means that it has to
229 * execute the following:
231 * - The action that prompted recirculation, and any actions following
232 * it within the same flow.
234 * - If the action that prompted recirculation was invoked within a
235 * NXAST_RESUBMIT, then any actions following the resubmit. These
236 * "resubmit"s can be nested, so this has to go all the way up the
239 * - The OpenFlow 1.1+ action set.
241 * State that actions and flow table lookups can depend on, such as the
242 * following, must also be preserved:
244 * - Metadata fields (input port, registers, OF1.1+ metadata, ...).
246 * - Action set, stack
248 * - The table ID and cookie of the flow being translated at each level
249 * of the control stack (since OFPAT_CONTROLLER actions send these to
252 * Translation allows for the control of this state preservation via these
253 * members. When a need for recirculation is identified, the translation
256 * 1. Sets 'recirc_action_offset' to the current size of 'action_set'. The
257 * action set is part of what needs to be preserved, so this allows the
258 * action set and the additional state to share the 'action_set' buffer.
259 * Later steps can tell that setup for recirculation is in progress from
260 * the nonnegative value of 'recirc_action_offset'.
262 * 2. Sets 'exit' to true to tell later steps that we're exiting from the
263 * translation process.
265 * 3. Adds an OFPACT_UNROLL_XLATE action to 'action_set'. This action
266 * holds the current table ID and cookie so that they can be restored
267 * during a post-recirculation upcall translation.
269 * 4. Adds the action that prompted recirculation and any actions following
270 * it within the same flow to 'action_set', so that they can be executed
271 * during a post-recirculation upcall translation.
275 * 6. The action that prompted recirculation might be nested in a stack of
276 * nested "resubmit"s that have actions remaining. Each of these notices
277 * that we're exiting (from 'exit') and that recirculation setup is in
278 * progress (from 'recirc_action_offset') and responds by adding more
279 * OFPACT_UNROLL_XLATE actions to 'action_set', as necessary, and any
280 * actions that were yet unprocessed.
282 * The caller stores all the state produced by this process associated with
283 * the recirculation ID. For post-recirculation upcall translation, the
284 * caller passes it back in for the new translation to execute. The
285 * process yielded a set of ofpacts that can be translated directly, so it
286 * is not much of a special case at that point.
288 int recirc_action_offset
; /* Offset in 'action_set' to actions to be
289 * executed after recirculation, or -1. */
290 int last_unroll_offset
; /* Offset in 'action_set' to the latest unroll
293 /* True if a packet was but is no longer MPLS (due to an MPLS pop action).
294 * This is a trigger for recirculation in cases where translating an action
295 * or looking up a flow requires access to the fields of the packet after
296 * the MPLS label stack that was originally present. */
299 /* OpenFlow 1.1+ action set.
301 * 'action_set' accumulates "struct ofpact"s added by OFPACT_WRITE_ACTIONS.
302 * When translation is otherwise complete, ofpacts_execute_action_set()
303 * converts it to a set of "struct ofpact"s that can be translated into
304 * datapath actions. */
305 bool action_set_has_group
; /* Action set contains OFPACT_GROUP? */
306 struct ofpbuf action_set
; /* Action set. */
307 uint64_t action_set_stub
[1024 / 8];
310 static void xlate_action_set(struct xlate_ctx
*ctx
);
313 ctx_trigger_recirculation(struct xlate_ctx
*ctx
)
316 ctx
->recirc_action_offset
= ctx
->action_set
.size
;
320 ctx_first_recirculation_action(const struct xlate_ctx
*ctx
)
322 return ctx
->recirc_action_offset
== ctx
->action_set
.size
;
326 exit_recirculates(const struct xlate_ctx
*ctx
)
328 /* When recirculating the 'recirc_action_offset' has a non-negative value.
330 return ctx
->recirc_action_offset
>= 0;
333 static void compose_recirculate_action(struct xlate_ctx
*ctx
);
335 /* A controller may use OFPP_NONE as the ingress port to indicate that
336 * it did not arrive on a "real" port. 'ofpp_none_bundle' exists for
337 * when an input bundle is needed for validation (e.g., mirroring or
338 * OFPP_NORMAL processing). It is not connected to an 'ofproto' or have
339 * any 'port' structs, so care must be taken when dealing with it. */
340 static struct xbundle ofpp_none_bundle
= {
342 .vlan_mode
= PORT_VLAN_TRUNK
345 /* Node in 'xport''s 'skb_priorities' map. Used to maintain a map from
346 * 'priority' (the datapath's term for QoS queue) to the dscp bits which all
347 * traffic egressing the 'ofport' with that priority should be marked with. */
348 struct skb_priority_to_dscp
{
349 struct hmap_node hmap_node
; /* Node in 'ofport_dpif''s 'skb_priorities'. */
350 uint32_t skb_priority
; /* Priority of this queue (see struct flow). */
352 uint8_t dscp
; /* DSCP bits to mark outgoing traffic with. */
368 /* xlate_cache entries hold enough information to perform the side effects of
369 * xlate_actions() for a rule, without needing to perform rule translation
370 * from scratch. The primary usage of these is to submit statistics to objects
371 * that a flow relates to, although they may be used for other effects as well
372 * (for instance, refreshing hard timeouts for learned flows). */
376 struct rule_dpif
*rule
;
383 struct netflow
*netflow
;
388 struct mbridge
*mbridge
;
389 mirror_mask_t mirrors
;
397 struct ofproto_dpif
*ofproto
;
398 struct ofputil_flow_mod
*fm
;
399 struct ofpbuf
*ofpacts
;
402 struct ofproto_dpif
*ofproto
;
407 struct rule_dpif
*rule
;
412 struct group_dpif
*group
;
413 struct ofputil_bucket
*bucket
;
416 char br_name
[IFNAMSIZ
];
422 #define XC_ENTRY_FOR_EACH(entry, entries, xcache) \
423 entries = xcache->entries; \
424 for (entry = ofpbuf_try_pull(&entries, sizeof *entry); \
426 entry = ofpbuf_try_pull(&entries, sizeof *entry))
429 struct ofpbuf entries
;
432 /* Xlate config contains hash maps of all bridges, bundles and ports.
433 * Xcfgp contains the pointer to the current xlate configuration.
434 * When the main thread needs to change the configuration, it copies xcfgp to
435 * new_xcfg and edits new_xcfg. This enables the use of RCU locking which
436 * does not block handler and revalidator threads. */
438 struct hmap xbridges
;
439 struct hmap xbundles
;
442 static OVSRCU_TYPE(struct xlate_cfg
*) xcfgp
= OVSRCU_INITIALIZER(NULL
);
443 static struct xlate_cfg
*new_xcfg
= NULL
;
445 static bool may_receive(const struct xport
*, struct xlate_ctx
*);
446 static void do_xlate_actions(const struct ofpact
*, size_t ofpacts_len
,
448 static void xlate_normal(struct xlate_ctx
*);
449 static inline void xlate_report(struct xlate_ctx
*, const char *);
450 static void xlate_table_action(struct xlate_ctx
*, ofp_port_t in_port
,
451 uint8_t table_id
, bool may_packet_in
,
452 bool honor_table_miss
);
453 static bool input_vid_is_valid(uint16_t vid
, struct xbundle
*, bool warn
);
454 static uint16_t input_vid_to_vlan(const struct xbundle
*, uint16_t vid
);
455 static void output_normal(struct xlate_ctx
*, const struct xbundle
*,
458 /* Optional bond recirculation parameter to compose_output_action(). */
459 struct xlate_bond_recirc
{
460 uint32_t recirc_id
; /* !0 Use recirculation instead of output. */
461 uint8_t hash_alg
; /* !0 Compute hash for recirc before. */
462 uint32_t hash_basis
; /* Compute hash for recirc before. */
465 static void compose_output_action(struct xlate_ctx
*, ofp_port_t ofp_port
,
466 const struct xlate_bond_recirc
*xr
);
468 static struct xbridge
*xbridge_lookup(struct xlate_cfg
*,
469 const struct ofproto_dpif
*);
470 static struct xbundle
*xbundle_lookup(struct xlate_cfg
*,
471 const struct ofbundle
*);
472 static struct xport
*xport_lookup(struct xlate_cfg
*,
473 const struct ofport_dpif
*);
474 static struct xport
*get_ofp_port(const struct xbridge
*, ofp_port_t ofp_port
);
475 static struct skb_priority_to_dscp
*get_skb_priority(const struct xport
*,
476 uint32_t skb_priority
);
477 static void clear_skb_priorities(struct xport
*);
478 static size_t count_skb_priorities(const struct xport
*);
479 static bool dscp_from_skb_priority(const struct xport
*, uint32_t skb_priority
,
482 static struct xc_entry
*xlate_cache_add_entry(struct xlate_cache
*xc
,
484 static void xlate_xbridge_init(struct xlate_cfg
*, struct xbridge
*);
485 static void xlate_xbundle_init(struct xlate_cfg
*, struct xbundle
*);
486 static void xlate_xport_init(struct xlate_cfg
*, struct xport
*);
487 static void xlate_xbridge_set(struct xbridge
*, struct dpif
*,
488 const struct mac_learning
*, struct stp
*,
489 struct rstp
*, const struct mcast_snooping
*,
490 const struct mbridge
*,
491 const struct dpif_sflow
*,
492 const struct dpif_ipfix
*,
493 const struct netflow
*,
494 bool forward_bpdu
, bool has_in_band
,
496 bool variable_length_userdata
,
497 size_t max_mpls_depth
,
498 bool masked_set_action
);
499 static void xlate_xbundle_set(struct xbundle
*xbundle
,
500 enum port_vlan_mode vlan_mode
, int vlan
,
501 unsigned long *trunks
, bool use_priority_tags
,
502 const struct bond
*bond
, const struct lacp
*lacp
,
504 static void xlate_xport_set(struct xport
*xport
, odp_port_t odp_port
,
505 const struct netdev
*netdev
, const struct cfm
*cfm
,
506 const struct bfd
*bfd
, const struct lldp
*lldp
,
507 int stp_port_no
, const struct rstp_port
*rstp_port
,
508 enum ofputil_port_config config
,
509 enum ofputil_port_state state
, bool is_tunnel
,
511 static void xlate_xbridge_remove(struct xlate_cfg
*, struct xbridge
*);
512 static void xlate_xbundle_remove(struct xlate_cfg
*, struct xbundle
*);
513 static void xlate_xport_remove(struct xlate_cfg
*, struct xport
*);
514 static void xlate_xbridge_copy(struct xbridge
*);
515 static void xlate_xbundle_copy(struct xbridge
*, struct xbundle
*);
516 static void xlate_xport_copy(struct xbridge
*, struct xbundle
*,
518 static void xlate_xcfg_free(struct xlate_cfg
*);
521 xlate_report(struct xlate_ctx
*ctx
, const char *s
)
523 if (OVS_UNLIKELY(ctx
->xin
->report_hook
)) {
524 ctx
->xin
->report_hook(ctx
->xin
, s
, ctx
->recurse
);
529 xlate_xbridge_init(struct xlate_cfg
*xcfg
, struct xbridge
*xbridge
)
531 list_init(&xbridge
->xbundles
);
532 hmap_init(&xbridge
->xports
);
533 hmap_insert(&xcfg
->xbridges
, &xbridge
->hmap_node
,
534 hash_pointer(xbridge
->ofproto
, 0));
538 xlate_xbundle_init(struct xlate_cfg
*xcfg
, struct xbundle
*xbundle
)
540 list_init(&xbundle
->xports
);
541 list_insert(&xbundle
->xbridge
->xbundles
, &xbundle
->list_node
);
542 hmap_insert(&xcfg
->xbundles
, &xbundle
->hmap_node
,
543 hash_pointer(xbundle
->ofbundle
, 0));
547 xlate_xport_init(struct xlate_cfg
*xcfg
, struct xport
*xport
)
549 hmap_init(&xport
->skb_priorities
);
550 hmap_insert(&xcfg
->xports
, &xport
->hmap_node
,
551 hash_pointer(xport
->ofport
, 0));
552 hmap_insert(&xport
->xbridge
->xports
, &xport
->ofp_node
,
553 hash_ofp_port(xport
->ofp_port
));
557 xlate_xbridge_set(struct xbridge
*xbridge
,
559 const struct mac_learning
*ml
, struct stp
*stp
,
560 struct rstp
*rstp
, const struct mcast_snooping
*ms
,
561 const struct mbridge
*mbridge
,
562 const struct dpif_sflow
*sflow
,
563 const struct dpif_ipfix
*ipfix
,
564 const struct netflow
*netflow
,
565 bool forward_bpdu
, bool has_in_band
,
567 bool variable_length_userdata
,
568 size_t max_mpls_depth
,
569 bool masked_set_action
)
571 if (xbridge
->ml
!= ml
) {
572 mac_learning_unref(xbridge
->ml
);
573 xbridge
->ml
= mac_learning_ref(ml
);
576 if (xbridge
->ms
!= ms
) {
577 mcast_snooping_unref(xbridge
->ms
);
578 xbridge
->ms
= mcast_snooping_ref(ms
);
581 if (xbridge
->mbridge
!= mbridge
) {
582 mbridge_unref(xbridge
->mbridge
);
583 xbridge
->mbridge
= mbridge_ref(mbridge
);
586 if (xbridge
->sflow
!= sflow
) {
587 dpif_sflow_unref(xbridge
->sflow
);
588 xbridge
->sflow
= dpif_sflow_ref(sflow
);
591 if (xbridge
->ipfix
!= ipfix
) {
592 dpif_ipfix_unref(xbridge
->ipfix
);
593 xbridge
->ipfix
= dpif_ipfix_ref(ipfix
);
596 if (xbridge
->stp
!= stp
) {
597 stp_unref(xbridge
->stp
);
598 xbridge
->stp
= stp_ref(stp
);
601 if (xbridge
->rstp
!= rstp
) {
602 rstp_unref(xbridge
->rstp
);
603 xbridge
->rstp
= rstp_ref(rstp
);
606 if (xbridge
->netflow
!= netflow
) {
607 netflow_unref(xbridge
->netflow
);
608 xbridge
->netflow
= netflow_ref(netflow
);
611 xbridge
->dpif
= dpif
;
612 xbridge
->forward_bpdu
= forward_bpdu
;
613 xbridge
->has_in_band
= has_in_band
;
614 xbridge
->enable_recirc
= enable_recirc
;
615 xbridge
->variable_length_userdata
= variable_length_userdata
;
616 xbridge
->max_mpls_depth
= max_mpls_depth
;
617 xbridge
->masked_set_action
= masked_set_action
;
621 xlate_xbundle_set(struct xbundle
*xbundle
,
622 enum port_vlan_mode vlan_mode
, int vlan
,
623 unsigned long *trunks
, bool use_priority_tags
,
624 const struct bond
*bond
, const struct lacp
*lacp
,
627 ovs_assert(xbundle
->xbridge
);
629 xbundle
->vlan_mode
= vlan_mode
;
630 xbundle
->vlan
= vlan
;
631 xbundle
->trunks
= trunks
;
632 xbundle
->use_priority_tags
= use_priority_tags
;
633 xbundle
->floodable
= floodable
;
635 if (xbundle
->bond
!= bond
) {
636 bond_unref(xbundle
->bond
);
637 xbundle
->bond
= bond_ref(bond
);
640 if (xbundle
->lacp
!= lacp
) {
641 lacp_unref(xbundle
->lacp
);
642 xbundle
->lacp
= lacp_ref(lacp
);
647 xlate_xport_set(struct xport
*xport
, odp_port_t odp_port
,
648 const struct netdev
*netdev
, const struct cfm
*cfm
,
649 const struct bfd
*bfd
, const struct lldp
*lldp
, int stp_port_no
,
650 const struct rstp_port
* rstp_port
,
651 enum ofputil_port_config config
, enum ofputil_port_state state
,
652 bool is_tunnel
, bool may_enable
)
654 xport
->config
= config
;
655 xport
->state
= state
;
656 xport
->stp_port_no
= stp_port_no
;
657 xport
->is_tunnel
= is_tunnel
;
658 xport
->may_enable
= may_enable
;
659 xport
->odp_port
= odp_port
;
661 if (xport
->rstp_port
!= rstp_port
) {
662 rstp_port_unref(xport
->rstp_port
);
663 xport
->rstp_port
= rstp_port_ref(rstp_port
);
666 if (xport
->cfm
!= cfm
) {
667 cfm_unref(xport
->cfm
);
668 xport
->cfm
= cfm_ref(cfm
);
671 if (xport
->bfd
!= bfd
) {
672 bfd_unref(xport
->bfd
);
673 xport
->bfd
= bfd_ref(bfd
);
676 if (xport
->lldp
!= lldp
) {
677 lldp_unref(xport
->lldp
);
678 xport
->lldp
= lldp_ref(lldp
);
681 if (xport
->netdev
!= netdev
) {
682 netdev_close(xport
->netdev
);
683 xport
->netdev
= netdev_ref(netdev
);
688 xlate_xbridge_copy(struct xbridge
*xbridge
)
690 struct xbundle
*xbundle
;
692 struct xbridge
*new_xbridge
= xzalloc(sizeof *xbridge
);
693 new_xbridge
->ofproto
= xbridge
->ofproto
;
694 new_xbridge
->name
= xstrdup(xbridge
->name
);
695 xlate_xbridge_init(new_xcfg
, new_xbridge
);
697 xlate_xbridge_set(new_xbridge
,
698 xbridge
->dpif
, xbridge
->ml
, xbridge
->stp
,
699 xbridge
->rstp
, xbridge
->ms
, xbridge
->mbridge
,
700 xbridge
->sflow
, xbridge
->ipfix
, xbridge
->netflow
,
701 xbridge
->forward_bpdu
,
702 xbridge
->has_in_band
, xbridge
->enable_recirc
,
703 xbridge
->variable_length_userdata
,
704 xbridge
->max_mpls_depth
, xbridge
->masked_set_action
);
705 LIST_FOR_EACH (xbundle
, list_node
, &xbridge
->xbundles
) {
706 xlate_xbundle_copy(new_xbridge
, xbundle
);
709 /* Copy xports which are not part of a xbundle */
710 HMAP_FOR_EACH (xport
, ofp_node
, &xbridge
->xports
) {
711 if (!xport
->xbundle
) {
712 xlate_xport_copy(new_xbridge
, NULL
, xport
);
718 xlate_xbundle_copy(struct xbridge
*xbridge
, struct xbundle
*xbundle
)
721 struct xbundle
*new_xbundle
= xzalloc(sizeof *xbundle
);
722 new_xbundle
->ofbundle
= xbundle
->ofbundle
;
723 new_xbundle
->xbridge
= xbridge
;
724 new_xbundle
->name
= xstrdup(xbundle
->name
);
725 xlate_xbundle_init(new_xcfg
, new_xbundle
);
727 xlate_xbundle_set(new_xbundle
, xbundle
->vlan_mode
,
728 xbundle
->vlan
, xbundle
->trunks
,
729 xbundle
->use_priority_tags
, xbundle
->bond
, xbundle
->lacp
,
731 LIST_FOR_EACH (xport
, bundle_node
, &xbundle
->xports
) {
732 xlate_xport_copy(xbridge
, new_xbundle
, xport
);
737 xlate_xport_copy(struct xbridge
*xbridge
, struct xbundle
*xbundle
,
740 struct skb_priority_to_dscp
*pdscp
, *new_pdscp
;
741 struct xport
*new_xport
= xzalloc(sizeof *xport
);
742 new_xport
->ofport
= xport
->ofport
;
743 new_xport
->ofp_port
= xport
->ofp_port
;
744 new_xport
->xbridge
= xbridge
;
745 xlate_xport_init(new_xcfg
, new_xport
);
747 xlate_xport_set(new_xport
, xport
->odp_port
, xport
->netdev
, xport
->cfm
,
748 xport
->bfd
, xport
->lldp
, xport
->stp_port_no
,
749 xport
->rstp_port
, xport
->config
, xport
->state
,
750 xport
->is_tunnel
, xport
->may_enable
);
753 struct xport
*peer
= xport_lookup(new_xcfg
, xport
->peer
->ofport
);
755 new_xport
->peer
= peer
;
756 new_xport
->peer
->peer
= new_xport
;
761 new_xport
->xbundle
= xbundle
;
762 list_insert(&new_xport
->xbundle
->xports
, &new_xport
->bundle_node
);
765 HMAP_FOR_EACH (pdscp
, hmap_node
, &xport
->skb_priorities
) {
766 new_pdscp
= xmalloc(sizeof *pdscp
);
767 new_pdscp
->skb_priority
= pdscp
->skb_priority
;
768 new_pdscp
->dscp
= pdscp
->dscp
;
769 hmap_insert(&new_xport
->skb_priorities
, &new_pdscp
->hmap_node
,
770 hash_int(new_pdscp
->skb_priority
, 0));
774 /* Sets the current xlate configuration to new_xcfg and frees the old xlate
775 * configuration in xcfgp.
777 * This needs to be called after editing the xlate configuration.
779 * Functions that edit the new xlate configuration are
780 * xlate_<ofport/bundle/ofport>_set and xlate_<ofport/bundle/ofport>_remove.
786 * edit_xlate_configuration();
788 * xlate_txn_commit(); */
790 xlate_txn_commit(void)
792 struct xlate_cfg
*xcfg
= ovsrcu_get(struct xlate_cfg
*, &xcfgp
);
794 ovsrcu_set(&xcfgp
, new_xcfg
);
795 ovsrcu_synchronize();
796 xlate_xcfg_free(xcfg
);
800 /* Copies the current xlate configuration in xcfgp to new_xcfg.
802 * This needs to be called prior to editing the xlate configuration. */
804 xlate_txn_start(void)
806 struct xbridge
*xbridge
;
807 struct xlate_cfg
*xcfg
;
809 ovs_assert(!new_xcfg
);
811 new_xcfg
= xmalloc(sizeof *new_xcfg
);
812 hmap_init(&new_xcfg
->xbridges
);
813 hmap_init(&new_xcfg
->xbundles
);
814 hmap_init(&new_xcfg
->xports
);
816 xcfg
= ovsrcu_get(struct xlate_cfg
*, &xcfgp
);
821 HMAP_FOR_EACH (xbridge
, hmap_node
, &xcfg
->xbridges
) {
822 xlate_xbridge_copy(xbridge
);
828 xlate_xcfg_free(struct xlate_cfg
*xcfg
)
830 struct xbridge
*xbridge
, *next_xbridge
;
836 HMAP_FOR_EACH_SAFE (xbridge
, next_xbridge
, hmap_node
, &xcfg
->xbridges
) {
837 xlate_xbridge_remove(xcfg
, xbridge
);
840 hmap_destroy(&xcfg
->xbridges
);
841 hmap_destroy(&xcfg
->xbundles
);
842 hmap_destroy(&xcfg
->xports
);
847 xlate_ofproto_set(struct ofproto_dpif
*ofproto
, const char *name
,
849 const struct mac_learning
*ml
, struct stp
*stp
,
850 struct rstp
*rstp
, const struct mcast_snooping
*ms
,
851 const struct mbridge
*mbridge
,
852 const struct dpif_sflow
*sflow
,
853 const struct dpif_ipfix
*ipfix
,
854 const struct netflow
*netflow
,
855 bool forward_bpdu
, bool has_in_band
, bool enable_recirc
,
856 bool variable_length_userdata
, size_t max_mpls_depth
,
857 bool masked_set_action
)
859 struct xbridge
*xbridge
;
861 ovs_assert(new_xcfg
);
863 xbridge
= xbridge_lookup(new_xcfg
, ofproto
);
865 xbridge
= xzalloc(sizeof *xbridge
);
866 xbridge
->ofproto
= ofproto
;
868 xlate_xbridge_init(new_xcfg
, xbridge
);
872 xbridge
->name
= xstrdup(name
);
874 xlate_xbridge_set(xbridge
, dpif
, ml
, stp
, rstp
, ms
, mbridge
, sflow
, ipfix
,
875 netflow
, forward_bpdu
, has_in_band
, enable_recirc
,
876 variable_length_userdata
, max_mpls_depth
,
881 xlate_xbridge_remove(struct xlate_cfg
*xcfg
, struct xbridge
*xbridge
)
883 struct xbundle
*xbundle
, *next_xbundle
;
884 struct xport
*xport
, *next_xport
;
890 HMAP_FOR_EACH_SAFE (xport
, next_xport
, ofp_node
, &xbridge
->xports
) {
891 xlate_xport_remove(xcfg
, xport
);
894 LIST_FOR_EACH_SAFE (xbundle
, next_xbundle
, list_node
, &xbridge
->xbundles
) {
895 xlate_xbundle_remove(xcfg
, xbundle
);
898 hmap_remove(&xcfg
->xbridges
, &xbridge
->hmap_node
);
899 mac_learning_unref(xbridge
->ml
);
900 mcast_snooping_unref(xbridge
->ms
);
901 mbridge_unref(xbridge
->mbridge
);
902 dpif_sflow_unref(xbridge
->sflow
);
903 dpif_ipfix_unref(xbridge
->ipfix
);
904 stp_unref(xbridge
->stp
);
905 rstp_unref(xbridge
->rstp
);
906 hmap_destroy(&xbridge
->xports
);
912 xlate_remove_ofproto(struct ofproto_dpif
*ofproto
)
914 struct xbridge
*xbridge
;
916 ovs_assert(new_xcfg
);
918 xbridge
= xbridge_lookup(new_xcfg
, ofproto
);
919 xlate_xbridge_remove(new_xcfg
, xbridge
);
923 xlate_bundle_set(struct ofproto_dpif
*ofproto
, struct ofbundle
*ofbundle
,
924 const char *name
, enum port_vlan_mode vlan_mode
, int vlan
,
925 unsigned long *trunks
, bool use_priority_tags
,
926 const struct bond
*bond
, const struct lacp
*lacp
,
929 struct xbundle
*xbundle
;
931 ovs_assert(new_xcfg
);
933 xbundle
= xbundle_lookup(new_xcfg
, ofbundle
);
935 xbundle
= xzalloc(sizeof *xbundle
);
936 xbundle
->ofbundle
= ofbundle
;
937 xbundle
->xbridge
= xbridge_lookup(new_xcfg
, ofproto
);
939 xlate_xbundle_init(new_xcfg
, xbundle
);
943 xbundle
->name
= xstrdup(name
);
945 xlate_xbundle_set(xbundle
, vlan_mode
, vlan
, trunks
,
946 use_priority_tags
, bond
, lacp
, floodable
);
950 xlate_xbundle_remove(struct xlate_cfg
*xcfg
, struct xbundle
*xbundle
)
958 LIST_FOR_EACH_POP (xport
, bundle_node
, &xbundle
->xports
) {
959 xport
->xbundle
= NULL
;
962 hmap_remove(&xcfg
->xbundles
, &xbundle
->hmap_node
);
963 list_remove(&xbundle
->list_node
);
964 bond_unref(xbundle
->bond
);
965 lacp_unref(xbundle
->lacp
);
971 xlate_bundle_remove(struct ofbundle
*ofbundle
)
973 struct xbundle
*xbundle
;
975 ovs_assert(new_xcfg
);
977 xbundle
= xbundle_lookup(new_xcfg
, ofbundle
);
978 xlate_xbundle_remove(new_xcfg
, xbundle
);
982 xlate_ofport_set(struct ofproto_dpif
*ofproto
, struct ofbundle
*ofbundle
,
983 struct ofport_dpif
*ofport
, ofp_port_t ofp_port
,
984 odp_port_t odp_port
, const struct netdev
*netdev
,
985 const struct cfm
*cfm
, const struct bfd
*bfd
,
986 const struct lldp
*lldp
, struct ofport_dpif
*peer
,
987 int stp_port_no
, const struct rstp_port
*rstp_port
,
988 const struct ofproto_port_queue
*qdscp_list
, size_t n_qdscp
,
989 enum ofputil_port_config config
,
990 enum ofputil_port_state state
, bool is_tunnel
,
996 ovs_assert(new_xcfg
);
998 xport
= xport_lookup(new_xcfg
, ofport
);
1000 xport
= xzalloc(sizeof *xport
);
1001 xport
->ofport
= ofport
;
1002 xport
->xbridge
= xbridge_lookup(new_xcfg
, ofproto
);
1003 xport
->ofp_port
= ofp_port
;
1005 xlate_xport_init(new_xcfg
, xport
);
1008 ovs_assert(xport
->ofp_port
== ofp_port
);
1010 xlate_xport_set(xport
, odp_port
, netdev
, cfm
, bfd
, lldp
,
1011 stp_port_no
, rstp_port
, config
, state
, is_tunnel
,
1015 xport
->peer
->peer
= NULL
;
1017 xport
->peer
= xport_lookup(new_xcfg
, peer
);
1019 xport
->peer
->peer
= xport
;
1022 if (xport
->xbundle
) {
1023 list_remove(&xport
->bundle_node
);
1025 xport
->xbundle
= xbundle_lookup(new_xcfg
, ofbundle
);
1026 if (xport
->xbundle
) {
1027 list_insert(&xport
->xbundle
->xports
, &xport
->bundle_node
);
1030 clear_skb_priorities(xport
);
1031 for (i
= 0; i
< n_qdscp
; i
++) {
1032 struct skb_priority_to_dscp
*pdscp
;
1033 uint32_t skb_priority
;
1035 if (dpif_queue_to_priority(xport
->xbridge
->dpif
, qdscp_list
[i
].queue
,
1040 pdscp
= xmalloc(sizeof *pdscp
);
1041 pdscp
->skb_priority
= skb_priority
;
1042 pdscp
->dscp
= (qdscp_list
[i
].dscp
<< 2) & IP_DSCP_MASK
;
1043 hmap_insert(&xport
->skb_priorities
, &pdscp
->hmap_node
,
1044 hash_int(pdscp
->skb_priority
, 0));
1049 xlate_xport_remove(struct xlate_cfg
*xcfg
, struct xport
*xport
)
1056 xport
->peer
->peer
= NULL
;
1060 if (xport
->xbundle
) {
1061 list_remove(&xport
->bundle_node
);
1064 clear_skb_priorities(xport
);
1065 hmap_destroy(&xport
->skb_priorities
);
1067 hmap_remove(&xcfg
->xports
, &xport
->hmap_node
);
1068 hmap_remove(&xport
->xbridge
->xports
, &xport
->ofp_node
);
1070 netdev_close(xport
->netdev
);
1071 rstp_port_unref(xport
->rstp_port
);
1072 cfm_unref(xport
->cfm
);
1073 bfd_unref(xport
->bfd
);
1074 lldp_unref(xport
->lldp
);
1079 xlate_ofport_remove(struct ofport_dpif
*ofport
)
1081 struct xport
*xport
;
1083 ovs_assert(new_xcfg
);
1085 xport
= xport_lookup(new_xcfg
, ofport
);
1086 xlate_xport_remove(new_xcfg
, xport
);
1089 static struct ofproto_dpif
*
1090 xlate_lookup_ofproto_(const struct dpif_backer
*backer
, const struct flow
*flow
,
1091 ofp_port_t
*ofp_in_port
, const struct xport
**xportp
)
1093 struct xlate_cfg
*xcfg
= ovsrcu_get(struct xlate_cfg
*, &xcfgp
);
1094 const struct xport
*xport
;
1096 xport
= xport_lookup(xcfg
, tnl_port_should_receive(flow
)
1097 ? tnl_port_receive(flow
)
1098 : odp_port_to_ofport(backer
, flow
->in_port
.odp_port
));
1099 if (OVS_UNLIKELY(!xport
)) {
1104 *ofp_in_port
= xport
->ofp_port
;
1106 return xport
->xbridge
->ofproto
;
1109 /* Given a datapath and flow metadata ('backer', and 'flow' respectively)
1110 * returns the corresponding struct ofproto_dpif and OpenFlow port number. */
1111 struct ofproto_dpif
*
1112 xlate_lookup_ofproto(const struct dpif_backer
*backer
, const struct flow
*flow
,
1113 ofp_port_t
*ofp_in_port
)
1115 const struct xport
*xport
;
1117 return xlate_lookup_ofproto_(backer
, flow
, ofp_in_port
, &xport
);
1120 /* Given a datapath and flow metadata ('backer', and 'flow' respectively),
1121 * optionally populates 'ofproto' with the ofproto_dpif, 'ofp_in_port' with the
1122 * openflow in_port, and 'ipfix', 'sflow', and 'netflow' with the appropriate
1123 * handles for those protocols if they're enabled. Caller may use the returned
1124 * pointers until quiescing, for longer term use additional references must
1127 * Returns 0 if successful, ENODEV if the parsed flow has no associated ofproto.
1130 xlate_lookup(const struct dpif_backer
*backer
, const struct flow
*flow
,
1131 struct ofproto_dpif
**ofprotop
, struct dpif_ipfix
**ipfix
,
1132 struct dpif_sflow
**sflow
, struct netflow
**netflow
,
1133 ofp_port_t
*ofp_in_port
)
1135 struct ofproto_dpif
*ofproto
;
1136 const struct xport
*xport
;
1138 ofproto
= xlate_lookup_ofproto_(backer
, flow
, ofp_in_port
, &xport
);
1145 *ofprotop
= ofproto
;
1149 *ipfix
= xport
? xport
->xbridge
->ipfix
: NULL
;
1153 *sflow
= xport
? xport
->xbridge
->sflow
: NULL
;
1157 *netflow
= xport
? xport
->xbridge
->netflow
: NULL
;
1163 static struct xbridge
*
1164 xbridge_lookup(struct xlate_cfg
*xcfg
, const struct ofproto_dpif
*ofproto
)
1166 struct hmap
*xbridges
;
1167 struct xbridge
*xbridge
;
1169 if (!ofproto
|| !xcfg
) {
1173 xbridges
= &xcfg
->xbridges
;
1175 HMAP_FOR_EACH_IN_BUCKET (xbridge
, hmap_node
, hash_pointer(ofproto
, 0),
1177 if (xbridge
->ofproto
== ofproto
) {
1184 static struct xbundle
*
1185 xbundle_lookup(struct xlate_cfg
*xcfg
, const struct ofbundle
*ofbundle
)
1187 struct hmap
*xbundles
;
1188 struct xbundle
*xbundle
;
1190 if (!ofbundle
|| !xcfg
) {
1194 xbundles
= &xcfg
->xbundles
;
1196 HMAP_FOR_EACH_IN_BUCKET (xbundle
, hmap_node
, hash_pointer(ofbundle
, 0),
1198 if (xbundle
->ofbundle
== ofbundle
) {
1205 static struct xport
*
1206 xport_lookup(struct xlate_cfg
*xcfg
, const struct ofport_dpif
*ofport
)
1208 struct hmap
*xports
;
1209 struct xport
*xport
;
1211 if (!ofport
|| !xcfg
) {
1215 xports
= &xcfg
->xports
;
1217 HMAP_FOR_EACH_IN_BUCKET (xport
, hmap_node
, hash_pointer(ofport
, 0),
1219 if (xport
->ofport
== ofport
) {
1226 static struct stp_port
*
1227 xport_get_stp_port(const struct xport
*xport
)
1229 return xport
->xbridge
->stp
&& xport
->stp_port_no
!= -1
1230 ? stp_get_port(xport
->xbridge
->stp
, xport
->stp_port_no
)
1235 xport_stp_learn_state(const struct xport
*xport
)
1237 struct stp_port
*sp
= xport_get_stp_port(xport
);
1239 ? stp_learn_in_state(stp_port_get_state(sp
))
1244 xport_stp_forward_state(const struct xport
*xport
)
1246 struct stp_port
*sp
= xport_get_stp_port(xport
);
1248 ? stp_forward_in_state(stp_port_get_state(sp
))
1253 xport_stp_should_forward_bpdu(const struct xport
*xport
)
1255 struct stp_port
*sp
= xport_get_stp_port(xport
);
1256 return stp_should_forward_bpdu(sp
? stp_port_get_state(sp
) : STP_DISABLED
);
1259 /* Returns true if STP should process 'flow'. Sets fields in 'wc' that
1260 * were used to make the determination.*/
1262 stp_should_process_flow(const struct flow
*flow
, struct flow_wildcards
*wc
)
1264 /* is_stp() also checks dl_type, but dl_type is always set in 'wc'. */
1265 memset(&wc
->masks
.dl_dst
, 0xff, sizeof wc
->masks
.dl_dst
);
1266 return is_stp(flow
);
1270 stp_process_packet(const struct xport
*xport
, const struct dp_packet
*packet
)
1272 struct stp_port
*sp
= xport_get_stp_port(xport
);
1273 struct dp_packet payload
= *packet
;
1274 struct eth_header
*eth
= dp_packet_data(&payload
);
1276 /* Sink packets on ports that have STP disabled when the bridge has
1278 if (!sp
|| stp_port_get_state(sp
) == STP_DISABLED
) {
1282 /* Trim off padding on payload. */
1283 if (dp_packet_size(&payload
) > ntohs(eth
->eth_type
) + ETH_HEADER_LEN
) {
1284 dp_packet_set_size(&payload
, ntohs(eth
->eth_type
) + ETH_HEADER_LEN
);
1287 if (dp_packet_try_pull(&payload
, ETH_HEADER_LEN
+ LLC_HEADER_LEN
)) {
1288 stp_received_bpdu(sp
, dp_packet_data(&payload
), dp_packet_size(&payload
));
1292 static enum rstp_state
1293 xport_get_rstp_port_state(const struct xport
*xport
)
1295 return xport
->rstp_port
1296 ? rstp_port_get_state(xport
->rstp_port
)
1301 xport_rstp_learn_state(const struct xport
*xport
)
1303 return xport
->xbridge
->rstp
&& xport
->rstp_port
1304 ? rstp_learn_in_state(xport_get_rstp_port_state(xport
))
1309 xport_rstp_forward_state(const struct xport
*xport
)
1311 return xport
->xbridge
->rstp
&& xport
->rstp_port
1312 ? rstp_forward_in_state(xport_get_rstp_port_state(xport
))
1317 xport_rstp_should_manage_bpdu(const struct xport
*xport
)
1319 return rstp_should_manage_bpdu(xport_get_rstp_port_state(xport
));
1323 rstp_process_packet(const struct xport
*xport
, const struct dp_packet
*packet
)
1325 struct dp_packet payload
= *packet
;
1326 struct eth_header
*eth
= dp_packet_data(&payload
);
1328 /* Sink packets on ports that have no RSTP. */
1329 if (!xport
->rstp_port
) {
1333 /* Trim off padding on payload. */
1334 if (dp_packet_size(&payload
) > ntohs(eth
->eth_type
) + ETH_HEADER_LEN
) {
1335 dp_packet_set_size(&payload
, ntohs(eth
->eth_type
) + ETH_HEADER_LEN
);
1338 if (dp_packet_try_pull(&payload
, ETH_HEADER_LEN
+ LLC_HEADER_LEN
)) {
1339 rstp_port_received_bpdu(xport
->rstp_port
, dp_packet_data(&payload
),
1340 dp_packet_size(&payload
));
1344 static struct xport
*
1345 get_ofp_port(const struct xbridge
*xbridge
, ofp_port_t ofp_port
)
1347 struct xport
*xport
;
1349 HMAP_FOR_EACH_IN_BUCKET (xport
, ofp_node
, hash_ofp_port(ofp_port
),
1351 if (xport
->ofp_port
== ofp_port
) {
1359 ofp_port_to_odp_port(const struct xbridge
*xbridge
, ofp_port_t ofp_port
)
1361 const struct xport
*xport
= get_ofp_port(xbridge
, ofp_port
);
1362 return xport
? xport
->odp_port
: ODPP_NONE
;
1366 odp_port_is_alive(const struct xlate_ctx
*ctx
, ofp_port_t ofp_port
)
1368 struct xport
*xport
= get_ofp_port(ctx
->xbridge
, ofp_port
);
1369 return xport
&& xport
->may_enable
;
1372 static struct ofputil_bucket
*
1373 group_first_live_bucket(const struct xlate_ctx
*, const struct group_dpif
*,
1377 group_is_alive(const struct xlate_ctx
*ctx
, uint32_t group_id
, int depth
)
1379 struct group_dpif
*group
;
1381 if (group_dpif_lookup(ctx
->xbridge
->ofproto
, group_id
, &group
)) {
1382 struct ofputil_bucket
*bucket
;
1384 bucket
= group_first_live_bucket(ctx
, group
, depth
);
1385 group_dpif_unref(group
);
1386 return bucket
== NULL
;
1392 #define MAX_LIVENESS_RECURSION 128 /* Arbitrary limit */
1395 bucket_is_alive(const struct xlate_ctx
*ctx
,
1396 struct ofputil_bucket
*bucket
, int depth
)
1398 if (depth
>= MAX_LIVENESS_RECURSION
) {
1399 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(1, 1);
1401 VLOG_WARN_RL(&rl
, "bucket chaining exceeded %d links",
1402 MAX_LIVENESS_RECURSION
);
1406 return (!ofputil_bucket_has_liveness(bucket
)
1407 || (bucket
->watch_port
!= OFPP_ANY
1408 && odp_port_is_alive(ctx
, bucket
->watch_port
))
1409 || (bucket
->watch_group
!= OFPG_ANY
1410 && group_is_alive(ctx
, bucket
->watch_group
, depth
+ 1)));
1413 static struct ofputil_bucket
*
1414 group_first_live_bucket(const struct xlate_ctx
*ctx
,
1415 const struct group_dpif
*group
, int depth
)
1417 struct ofputil_bucket
*bucket
;
1418 const struct ovs_list
*buckets
;
1420 group_dpif_get_buckets(group
, &buckets
);
1421 LIST_FOR_EACH (bucket
, list_node
, buckets
) {
1422 if (bucket_is_alive(ctx
, bucket
, depth
)) {
1430 static struct ofputil_bucket
*
1431 group_best_live_bucket(const struct xlate_ctx
*ctx
,
1432 const struct group_dpif
*group
,
1435 struct ofputil_bucket
*best_bucket
= NULL
;
1436 uint32_t best_score
= 0;
1439 struct ofputil_bucket
*bucket
;
1440 const struct ovs_list
*buckets
;
1442 group_dpif_get_buckets(group
, &buckets
);
1443 LIST_FOR_EACH (bucket
, list_node
, buckets
) {
1444 if (bucket_is_alive(ctx
, bucket
, 0)) {
1445 uint32_t score
= (hash_int(i
, basis
) & 0xffff) * bucket
->weight
;
1446 if (score
>= best_score
) {
1447 best_bucket
= bucket
;
1458 xbundle_trunks_vlan(const struct xbundle
*bundle
, uint16_t vlan
)
1460 return (bundle
->vlan_mode
!= PORT_VLAN_ACCESS
1461 && (!bundle
->trunks
|| bitmap_is_set(bundle
->trunks
, vlan
)));
1465 xbundle_includes_vlan(const struct xbundle
*xbundle
, uint16_t vlan
)
1467 return vlan
== xbundle
->vlan
|| xbundle_trunks_vlan(xbundle
, vlan
);
1470 static mirror_mask_t
1471 xbundle_mirror_out(const struct xbridge
*xbridge
, struct xbundle
*xbundle
)
1473 return xbundle
!= &ofpp_none_bundle
1474 ? mirror_bundle_out(xbridge
->mbridge
, xbundle
->ofbundle
)
1478 static mirror_mask_t
1479 xbundle_mirror_src(const struct xbridge
*xbridge
, struct xbundle
*xbundle
)
1481 return xbundle
!= &ofpp_none_bundle
1482 ? mirror_bundle_src(xbridge
->mbridge
, xbundle
->ofbundle
)
1486 static mirror_mask_t
1487 xbundle_mirror_dst(const struct xbridge
*xbridge
, struct xbundle
*xbundle
)
1489 return xbundle
!= &ofpp_none_bundle
1490 ? mirror_bundle_dst(xbridge
->mbridge
, xbundle
->ofbundle
)
1494 static struct xbundle
*
1495 lookup_input_bundle(const struct xbridge
*xbridge
, ofp_port_t in_port
,
1496 bool warn
, struct xport
**in_xportp
)
1498 struct xport
*xport
;
1500 /* Find the port and bundle for the received packet. */
1501 xport
= get_ofp_port(xbridge
, in_port
);
1505 if (xport
&& xport
->xbundle
) {
1506 return xport
->xbundle
;
1509 /* Special-case OFPP_NONE (OF1.0) and OFPP_CONTROLLER (OF1.1+),
1510 * which a controller may use as the ingress port for traffic that
1511 * it is sourcing. */
1512 if (in_port
== OFPP_CONTROLLER
|| in_port
== OFPP_NONE
) {
1513 return &ofpp_none_bundle
;
1516 /* Odd. A few possible reasons here:
1518 * - We deleted a port but there are still a few packets queued up
1521 * - Someone externally added a port (e.g. "ovs-dpctl add-if") that
1522 * we don't know about.
1524 * - The ofproto client didn't configure the port as part of a bundle.
1525 * This is particularly likely to happen if a packet was received on the
1526 * port after it was created, but before the client had a chance to
1527 * configure its bundle.
1530 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(1, 5);
1532 VLOG_WARN_RL(&rl
, "bridge %s: received packet on unknown "
1533 "port %"PRIu16
, xbridge
->name
, in_port
);
1539 add_mirror_actions(struct xlate_ctx
*ctx
, const struct flow
*orig_flow
)
1541 const struct xbridge
*xbridge
= ctx
->xbridge
;
1542 mirror_mask_t mirrors
;
1543 struct xbundle
*in_xbundle
;
1547 mirrors
= ctx
->xout
->mirrors
;
1548 ctx
->xout
->mirrors
= 0;
1550 in_xbundle
= lookup_input_bundle(xbridge
, orig_flow
->in_port
.ofp_port
,
1551 ctx
->xin
->packet
!= NULL
, NULL
);
1555 mirrors
|= xbundle_mirror_src(xbridge
, in_xbundle
);
1557 /* Drop frames on bundles reserved for mirroring. */
1558 if (xbundle_mirror_out(xbridge
, in_xbundle
)) {
1559 if (ctx
->xin
->packet
!= NULL
) {
1560 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(1, 5);
1561 VLOG_WARN_RL(&rl
, "bridge %s: dropping packet received on port "
1562 "%s, which is reserved exclusively for mirroring",
1563 ctx
->xbridge
->name
, in_xbundle
->name
);
1565 ofpbuf_clear(ctx
->xout
->odp_actions
);
1570 vid
= vlan_tci_to_vid(orig_flow
->vlan_tci
);
1571 if (!input_vid_is_valid(vid
, in_xbundle
, ctx
->xin
->packet
!= NULL
)) {
1574 vlan
= input_vid_to_vlan(in_xbundle
, vid
);
1580 /* Restore the original packet before adding the mirror actions. */
1581 ctx
->xin
->flow
= *orig_flow
;
1584 mirror_mask_t dup_mirrors
;
1585 struct ofbundle
*out
;
1586 unsigned long *vlans
;
1591 has_mirror
= mirror_get(xbridge
->mbridge
, raw_ctz(mirrors
),
1592 &vlans
, &dup_mirrors
, &out
, &out_vlan
);
1593 ovs_assert(has_mirror
);
1596 ctx
->xout
->wc
.masks
.vlan_tci
|= htons(VLAN_CFI
| VLAN_VID_MASK
);
1598 vlan_mirrored
= !vlans
|| bitmap_is_set(vlans
, vlan
);
1601 if (!vlan_mirrored
) {
1602 mirrors
= zero_rightmost_1bit(mirrors
);
1606 mirrors
&= ~dup_mirrors
;
1607 ctx
->xout
->mirrors
|= dup_mirrors
;
1609 struct xlate_cfg
*xcfg
= ovsrcu_get(struct xlate_cfg
*, &xcfgp
);
1610 struct xbundle
*out_xbundle
= xbundle_lookup(xcfg
, out
);
1612 output_normal(ctx
, out_xbundle
, vlan
);
1614 } else if (vlan
!= out_vlan
1615 && !eth_addr_is_reserved(orig_flow
->dl_dst
)) {
1616 struct xbundle
*xbundle
;
1618 LIST_FOR_EACH (xbundle
, list_node
, &xbridge
->xbundles
) {
1619 if (xbundle_includes_vlan(xbundle
, out_vlan
)
1620 && !xbundle_mirror_out(xbridge
, xbundle
)) {
1621 output_normal(ctx
, xbundle
, out_vlan
);
1628 /* Given 'vid', the VID obtained from the 802.1Q header that was received as
1629 * part of a packet (specify 0 if there was no 802.1Q header), and 'in_xbundle',
1630 * the bundle on which the packet was received, returns the VLAN to which the
1633 * Both 'vid' and the return value are in the range 0...4095. */
1635 input_vid_to_vlan(const struct xbundle
*in_xbundle
, uint16_t vid
)
1637 switch (in_xbundle
->vlan_mode
) {
1638 case PORT_VLAN_ACCESS
:
1639 return in_xbundle
->vlan
;
1642 case PORT_VLAN_TRUNK
:
1645 case PORT_VLAN_NATIVE_UNTAGGED
:
1646 case PORT_VLAN_NATIVE_TAGGED
:
1647 return vid
? vid
: in_xbundle
->vlan
;
1654 /* Checks whether a packet with the given 'vid' may ingress on 'in_xbundle'.
1655 * If so, returns true. Otherwise, returns false and, if 'warn' is true, logs
1658 * 'vid' should be the VID obtained from the 802.1Q header that was received as
1659 * part of a packet (specify 0 if there was no 802.1Q header), in the range
1662 input_vid_is_valid(uint16_t vid
, struct xbundle
*in_xbundle
, bool warn
)
1664 /* Allow any VID on the OFPP_NONE port. */
1665 if (in_xbundle
== &ofpp_none_bundle
) {
1669 switch (in_xbundle
->vlan_mode
) {
1670 case PORT_VLAN_ACCESS
:
1673 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(1, 5);
1674 VLOG_WARN_RL(&rl
, "dropping VLAN %"PRIu16
" tagged "
1675 "packet received on port %s configured as VLAN "
1676 "%"PRIu16
" access port", vid
, in_xbundle
->name
,
1683 case PORT_VLAN_NATIVE_UNTAGGED
:
1684 case PORT_VLAN_NATIVE_TAGGED
:
1686 /* Port must always carry its native VLAN. */
1690 case PORT_VLAN_TRUNK
:
1691 if (!xbundle_includes_vlan(in_xbundle
, vid
)) {
1693 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(1, 5);
1694 VLOG_WARN_RL(&rl
, "dropping VLAN %"PRIu16
" packet "
1695 "received on port %s not configured for trunking "
1696 "VLAN %"PRIu16
, vid
, in_xbundle
->name
, vid
);
1708 /* Given 'vlan', the VLAN that a packet belongs to, and
1709 * 'out_xbundle', a bundle on which the packet is to be output, returns the VID
1710 * that should be included in the 802.1Q header. (If the return value is 0,
1711 * then the 802.1Q header should only be included in the packet if there is a
1714 * Both 'vlan' and the return value are in the range 0...4095. */
1716 output_vlan_to_vid(const struct xbundle
*out_xbundle
, uint16_t vlan
)
1718 switch (out_xbundle
->vlan_mode
) {
1719 case PORT_VLAN_ACCESS
:
1722 case PORT_VLAN_TRUNK
:
1723 case PORT_VLAN_NATIVE_TAGGED
:
1726 case PORT_VLAN_NATIVE_UNTAGGED
:
1727 return vlan
== out_xbundle
->vlan
? 0 : vlan
;
1735 output_normal(struct xlate_ctx
*ctx
, const struct xbundle
*out_xbundle
,
1738 ovs_be16
*flow_tci
= &ctx
->xin
->flow
.vlan_tci
;
1740 ovs_be16 tci
, old_tci
;
1741 struct xport
*xport
;
1742 struct xlate_bond_recirc xr
;
1743 bool use_recirc
= false;
1745 vid
= output_vlan_to_vid(out_xbundle
, vlan
);
1746 if (list_is_empty(&out_xbundle
->xports
)) {
1747 /* Partially configured bundle with no slaves. Drop the packet. */
1749 } else if (!out_xbundle
->bond
) {
1750 xport
= CONTAINER_OF(list_front(&out_xbundle
->xports
), struct xport
,
1753 struct xlate_cfg
*xcfg
= ovsrcu_get(struct xlate_cfg
*, &xcfgp
);
1754 struct flow_wildcards
*wc
= &ctx
->xout
->wc
;
1755 struct ofport_dpif
*ofport
;
1757 if (ctx
->xbridge
->enable_recirc
) {
1758 use_recirc
= bond_may_recirc(
1759 out_xbundle
->bond
, &xr
.recirc_id
, &xr
.hash_basis
);
1762 /* Only TCP mode uses recirculation. */
1763 xr
.hash_alg
= OVS_HASH_ALG_L4
;
1764 bond_update_post_recirc_rules(out_xbundle
->bond
, false);
1766 /* Recirculation does not require unmasking hash fields. */
1771 ofport
= bond_choose_output_slave(out_xbundle
->bond
,
1772 &ctx
->xin
->flow
, wc
, vid
);
1773 xport
= xport_lookup(xcfg
, ofport
);
1776 /* No slaves enabled, so drop packet. */
1780 /* If use_recirc is set, the main thread will handle stats
1781 * accounting for this bond. */
1783 if (ctx
->xin
->resubmit_stats
) {
1784 bond_account(out_xbundle
->bond
, &ctx
->xin
->flow
, vid
,
1785 ctx
->xin
->resubmit_stats
->n_bytes
);
1787 if (ctx
->xin
->xcache
) {
1788 struct xc_entry
*entry
;
1791 flow
= &ctx
->xin
->flow
;
1792 entry
= xlate_cache_add_entry(ctx
->xin
->xcache
, XC_BOND
);
1793 entry
->u
.bond
.bond
= bond_ref(out_xbundle
->bond
);
1794 entry
->u
.bond
.flow
= xmemdup(flow
, sizeof *flow
);
1795 entry
->u
.bond
.vid
= vid
;
1800 old_tci
= *flow_tci
;
1802 if (tci
|| out_xbundle
->use_priority_tags
) {
1803 tci
|= *flow_tci
& htons(VLAN_PCP_MASK
);
1805 tci
|= htons(VLAN_CFI
);
1810 compose_output_action(ctx
, xport
->ofp_port
, use_recirc
? &xr
: NULL
);
1811 *flow_tci
= old_tci
;
1814 /* A VM broadcasts a gratuitous ARP to indicate that it has resumed after
1815 * migration. Older Citrix-patched Linux DomU used gratuitous ARP replies to
1816 * indicate this; newer upstream kernels use gratuitous ARP requests. */
1818 is_gratuitous_arp(const struct flow
*flow
, struct flow_wildcards
*wc
)
1820 if (flow
->dl_type
!= htons(ETH_TYPE_ARP
)) {
1824 memset(&wc
->masks
.dl_dst
, 0xff, sizeof wc
->masks
.dl_dst
);
1825 if (!eth_addr_is_broadcast(flow
->dl_dst
)) {
1829 memset(&wc
->masks
.nw_proto
, 0xff, sizeof wc
->masks
.nw_proto
);
1830 if (flow
->nw_proto
== ARP_OP_REPLY
) {
1832 } else if (flow
->nw_proto
== ARP_OP_REQUEST
) {
1833 memset(&wc
->masks
.nw_src
, 0xff, sizeof wc
->masks
.nw_src
);
1834 memset(&wc
->masks
.nw_dst
, 0xff, sizeof wc
->masks
.nw_dst
);
1836 return flow
->nw_src
== flow
->nw_dst
;
1842 /* Determines whether packets in 'flow' within 'xbridge' should be forwarded or
1843 * dropped. Returns true if they may be forwarded, false if they should be
1846 * 'in_port' must be the xport that corresponds to flow->in_port.
1847 * 'in_port' must be part of a bundle (e.g. in_port->bundle must be nonnull).
1849 * 'vlan' must be the VLAN that corresponds to flow->vlan_tci on 'in_port', as
1850 * returned by input_vid_to_vlan(). It must be a valid VLAN for 'in_port', as
1851 * checked by input_vid_is_valid().
1853 * May also add tags to '*tags', although the current implementation only does
1854 * so in one special case.
1857 is_admissible(struct xlate_ctx
*ctx
, struct xport
*in_port
,
1860 struct xbundle
*in_xbundle
= in_port
->xbundle
;
1861 const struct xbridge
*xbridge
= ctx
->xbridge
;
1862 struct flow
*flow
= &ctx
->xin
->flow
;
1864 /* Drop frames for reserved multicast addresses
1865 * only if forward_bpdu option is absent. */
1866 if (!xbridge
->forward_bpdu
&& eth_addr_is_reserved(flow
->dl_dst
)) {
1867 xlate_report(ctx
, "packet has reserved destination MAC, dropping");
1871 if (in_xbundle
->bond
) {
1872 struct mac_entry
*mac
;
1874 switch (bond_check_admissibility(in_xbundle
->bond
, in_port
->ofport
,
1880 xlate_report(ctx
, "bonding refused admissibility, dropping");
1883 case BV_DROP_IF_MOVED
:
1884 ovs_rwlock_rdlock(&xbridge
->ml
->rwlock
);
1885 mac
= mac_learning_lookup(xbridge
->ml
, flow
->dl_src
, vlan
);
1887 && mac_entry_get_port(xbridge
->ml
, mac
) != in_xbundle
->ofbundle
1888 && (!is_gratuitous_arp(flow
, &ctx
->xout
->wc
)
1889 || mac_entry_is_grat_arp_locked(mac
))) {
1890 ovs_rwlock_unlock(&xbridge
->ml
->rwlock
);
1891 xlate_report(ctx
, "SLB bond thinks this packet looped back, "
1895 ovs_rwlock_unlock(&xbridge
->ml
->rwlock
);
1903 /* Checks whether a MAC learning update is necessary for MAC learning table
1904 * 'ml' given that a packet matching 'flow' was received on 'in_xbundle' in
1907 * Most packets processed through the MAC learning table do not actually
1908 * change it in any way. This function requires only a read lock on the MAC
1909 * learning table, so it is much cheaper in this common case.
1911 * Keep the code here synchronized with that in update_learning_table__()
1914 is_mac_learning_update_needed(const struct mac_learning
*ml
,
1915 const struct flow
*flow
,
1916 struct flow_wildcards
*wc
,
1917 int vlan
, struct xbundle
*in_xbundle
)
1918 OVS_REQ_RDLOCK(ml
->rwlock
)
1920 struct mac_entry
*mac
;
1922 if (!mac_learning_may_learn(ml
, flow
->dl_src
, vlan
)) {
1926 mac
= mac_learning_lookup(ml
, flow
->dl_src
, vlan
);
1927 if (!mac
|| mac_entry_age(ml
, mac
)) {
1931 if (is_gratuitous_arp(flow
, wc
)) {
1932 /* We don't want to learn from gratuitous ARP packets that are
1933 * reflected back over bond slaves so we lock the learning table. */
1934 if (!in_xbundle
->bond
) {
1936 } else if (mac_entry_is_grat_arp_locked(mac
)) {
1941 return mac_entry_get_port(ml
, mac
) != in_xbundle
->ofbundle
;
1945 /* Updates MAC learning table 'ml' given that a packet matching 'flow' was
1946 * received on 'in_xbundle' in 'vlan'.
1948 * This code repeats all the checks in is_mac_learning_update_needed() because
1949 * the lock was released between there and here and thus the MAC learning state
1950 * could have changed.
1952 * Keep the code here synchronized with that in is_mac_learning_update_needed()
1955 update_learning_table__(const struct xbridge
*xbridge
,
1956 const struct flow
*flow
, struct flow_wildcards
*wc
,
1957 int vlan
, struct xbundle
*in_xbundle
)
1958 OVS_REQ_WRLOCK(xbridge
->ml
->rwlock
)
1960 struct mac_entry
*mac
;
1962 if (!mac_learning_may_learn(xbridge
->ml
, flow
->dl_src
, vlan
)) {
1966 mac
= mac_learning_insert(xbridge
->ml
, flow
->dl_src
, vlan
);
1967 if (is_gratuitous_arp(flow
, wc
)) {
1968 /* We don't want to learn from gratuitous ARP packets that are
1969 * reflected back over bond slaves so we lock the learning table. */
1970 if (!in_xbundle
->bond
) {
1971 mac_entry_set_grat_arp_lock(mac
);
1972 } else if (mac_entry_is_grat_arp_locked(mac
)) {
1977 if (mac_entry_get_port(xbridge
->ml
, mac
) != in_xbundle
->ofbundle
) {
1978 /* The log messages here could actually be useful in debugging,
1979 * so keep the rate limit relatively high. */
1980 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(30, 300);
1982 VLOG_DBG_RL(&rl
, "bridge %s: learned that "ETH_ADDR_FMT
" is "
1983 "on port %s in VLAN %d",
1984 xbridge
->name
, ETH_ADDR_ARGS(flow
->dl_src
),
1985 in_xbundle
->name
, vlan
);
1987 mac_entry_set_port(xbridge
->ml
, mac
, in_xbundle
->ofbundle
);
1992 update_learning_table(const struct xbridge
*xbridge
,
1993 const struct flow
*flow
, struct flow_wildcards
*wc
,
1994 int vlan
, struct xbundle
*in_xbundle
)
1998 /* Don't learn the OFPP_NONE port. */
1999 if (in_xbundle
== &ofpp_none_bundle
) {
2003 /* First try the common case: no change to MAC learning table. */
2004 ovs_rwlock_rdlock(&xbridge
->ml
->rwlock
);
2005 need_update
= is_mac_learning_update_needed(xbridge
->ml
, flow
, wc
, vlan
,
2007 ovs_rwlock_unlock(&xbridge
->ml
->rwlock
);
2010 /* Slow path: MAC learning table might need an update. */
2011 ovs_rwlock_wrlock(&xbridge
->ml
->rwlock
);
2012 update_learning_table__(xbridge
, flow
, wc
, vlan
, in_xbundle
);
2013 ovs_rwlock_unlock(&xbridge
->ml
->rwlock
);
2017 /* Updates multicast snooping table 'ms' given that a packet matching 'flow'
2018 * was received on 'in_xbundle' in 'vlan' and is either Report or Query. */
2020 update_mcast_snooping_table__(const struct xbridge
*xbridge
,
2021 const struct flow
*flow
,
2022 struct mcast_snooping
*ms
,
2023 ovs_be32 ip4
, int vlan
,
2024 struct xbundle
*in_xbundle
)
2025 OVS_REQ_WRLOCK(ms
->rwlock
)
2027 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(60, 30);
2029 switch (ntohs(flow
->tp_src
)) {
2030 case IGMP_HOST_MEMBERSHIP_REPORT
:
2031 case IGMPV2_HOST_MEMBERSHIP_REPORT
:
2032 if (mcast_snooping_add_group(ms
, ip4
, vlan
, in_xbundle
->ofbundle
)) {
2033 VLOG_DBG_RL(&rl
, "bridge %s: multicast snooping learned that "
2034 IP_FMT
" is on port %s in VLAN %d",
2035 xbridge
->name
, IP_ARGS(ip4
), in_xbundle
->name
, vlan
);
2038 case IGMP_HOST_LEAVE_MESSAGE
:
2039 if (mcast_snooping_leave_group(ms
, ip4
, vlan
, in_xbundle
->ofbundle
)) {
2040 VLOG_DBG_RL(&rl
, "bridge %s: multicast snooping leaving "
2041 IP_FMT
" is on port %s in VLAN %d",
2042 xbridge
->name
, IP_ARGS(ip4
), in_xbundle
->name
, vlan
);
2045 case IGMP_HOST_MEMBERSHIP_QUERY
:
2046 if (flow
->nw_src
&& mcast_snooping_add_mrouter(ms
, vlan
,
2047 in_xbundle
->ofbundle
)) {
2048 VLOG_DBG_RL(&rl
, "bridge %s: multicast snooping query from "
2049 IP_FMT
" is on port %s in VLAN %d",
2050 xbridge
->name
, IP_ARGS(flow
->nw_src
),
2051 in_xbundle
->name
, vlan
);
2057 /* Updates multicast snooping table 'ms' given that a packet matching 'flow'
2058 * was received on 'in_xbundle' in 'vlan'. */
2060 update_mcast_snooping_table(const struct xbridge
*xbridge
,
2061 const struct flow
*flow
, int vlan
,
2062 struct xbundle
*in_xbundle
)
2064 struct mcast_snooping
*ms
= xbridge
->ms
;
2065 struct xlate_cfg
*xcfg
;
2066 struct xbundle
*mcast_xbundle
;
2067 struct mcast_port_bundle
*fport
;
2069 /* Don't learn the OFPP_NONE port. */
2070 if (in_xbundle
== &ofpp_none_bundle
) {
2074 /* Don't learn from flood ports */
2075 mcast_xbundle
= NULL
;
2076 ovs_rwlock_wrlock(&ms
->rwlock
);
2077 xcfg
= ovsrcu_get(struct xlate_cfg
*, &xcfgp
);
2078 LIST_FOR_EACH(fport
, node
, &ms
->fport_list
) {
2079 mcast_xbundle
= xbundle_lookup(xcfg
, fport
->port
);
2080 if (mcast_xbundle
== in_xbundle
) {
2085 if (!mcast_xbundle
|| mcast_xbundle
!= in_xbundle
) {
2086 update_mcast_snooping_table__(xbridge
, flow
, ms
, flow
->igmp_group_ip4
,
2089 ovs_rwlock_unlock(&ms
->rwlock
);
2092 /* send the packet to ports having the multicast group learned */
2094 xlate_normal_mcast_send_group(struct xlate_ctx
*ctx
,
2095 struct mcast_snooping
*ms OVS_UNUSED
,
2096 struct mcast_group
*grp
,
2097 struct xbundle
*in_xbundle
, uint16_t vlan
)
2098 OVS_REQ_RDLOCK(ms
->rwlock
)
2100 struct xlate_cfg
*xcfg
;
2101 struct mcast_group_bundle
*b
;
2102 struct xbundle
*mcast_xbundle
;
2104 xcfg
= ovsrcu_get(struct xlate_cfg
*, &xcfgp
);
2105 LIST_FOR_EACH(b
, bundle_node
, &grp
->bundle_lru
) {
2106 mcast_xbundle
= xbundle_lookup(xcfg
, b
->port
);
2107 if (mcast_xbundle
&& mcast_xbundle
!= in_xbundle
) {
2108 xlate_report(ctx
, "forwarding to mcast group port");
2109 output_normal(ctx
, mcast_xbundle
, vlan
);
2110 } else if (!mcast_xbundle
) {
2111 xlate_report(ctx
, "mcast group port is unknown, dropping");
2113 xlate_report(ctx
, "mcast group port is input port, dropping");
2118 /* send the packet to ports connected to multicast routers */
2120 xlate_normal_mcast_send_mrouters(struct xlate_ctx
*ctx
,
2121 struct mcast_snooping
*ms
,
2122 struct xbundle
*in_xbundle
, uint16_t vlan
)
2123 OVS_REQ_RDLOCK(ms
->rwlock
)
2125 struct xlate_cfg
*xcfg
;
2126 struct mcast_mrouter_bundle
*mrouter
;
2127 struct xbundle
*mcast_xbundle
;
2129 xcfg
= ovsrcu_get(struct xlate_cfg
*, &xcfgp
);
2130 LIST_FOR_EACH(mrouter
, mrouter_node
, &ms
->mrouter_lru
) {
2131 mcast_xbundle
= xbundle_lookup(xcfg
, mrouter
->port
);
2132 if (mcast_xbundle
&& mcast_xbundle
!= in_xbundle
) {
2133 xlate_report(ctx
, "forwarding to mcast router port");
2134 output_normal(ctx
, mcast_xbundle
, vlan
);
2135 } else if (!mcast_xbundle
) {
2136 xlate_report(ctx
, "mcast router port is unknown, dropping");
2138 xlate_report(ctx
, "mcast router port is input port, dropping");
2143 /* send the packet to ports flagged to be flooded */
2145 xlate_normal_mcast_send_fports(struct xlate_ctx
*ctx
,
2146 struct mcast_snooping
*ms
,
2147 struct xbundle
*in_xbundle
, uint16_t vlan
)
2148 OVS_REQ_RDLOCK(ms
->rwlock
)
2150 struct xlate_cfg
*xcfg
;
2151 struct mcast_port_bundle
*fport
;
2152 struct xbundle
*mcast_xbundle
;
2154 xcfg
= ovsrcu_get(struct xlate_cfg
*, &xcfgp
);
2155 LIST_FOR_EACH(fport
, node
, &ms
->fport_list
) {
2156 mcast_xbundle
= xbundle_lookup(xcfg
, fport
->port
);
2157 if (mcast_xbundle
&& mcast_xbundle
!= in_xbundle
) {
2158 xlate_report(ctx
, "forwarding to mcast flood port");
2159 output_normal(ctx
, mcast_xbundle
, vlan
);
2160 } else if (!mcast_xbundle
) {
2161 xlate_report(ctx
, "mcast flood port is unknown, dropping");
2163 xlate_report(ctx
, "mcast flood port is input port, dropping");
2168 /* forward the Reports to configured ports */
2170 xlate_normal_mcast_send_rports(struct xlate_ctx
*ctx
,
2171 struct mcast_snooping
*ms
,
2172 struct xbundle
*in_xbundle
, uint16_t vlan
)
2173 OVS_REQ_RDLOCK(ms
->rwlock
)
2175 struct xlate_cfg
*xcfg
;
2176 struct mcast_port_bundle
*rport
;
2177 struct xbundle
*mcast_xbundle
;
2179 xcfg
= ovsrcu_get(struct xlate_cfg
*, &xcfgp
);
2180 LIST_FOR_EACH(rport
, node
, &ms
->rport_list
) {
2181 mcast_xbundle
= xbundle_lookup(xcfg
, rport
->port
);
2182 if (mcast_xbundle
&& mcast_xbundle
!= in_xbundle
) {
2183 xlate_report(ctx
, "forwarding Report to mcast flagged port");
2184 output_normal(ctx
, mcast_xbundle
, vlan
);
2185 } else if (!mcast_xbundle
) {
2186 xlate_report(ctx
, "mcast port is unknown, dropping the Report");
2188 xlate_report(ctx
, "mcast port is input port, dropping the Report");
2194 xlate_normal_flood(struct xlate_ctx
*ctx
, struct xbundle
*in_xbundle
,
2197 struct xbundle
*xbundle
;
2199 LIST_FOR_EACH (xbundle
, list_node
, &ctx
->xbridge
->xbundles
) {
2200 if (xbundle
!= in_xbundle
2201 && xbundle_includes_vlan(xbundle
, vlan
)
2202 && xbundle
->floodable
2203 && !xbundle_mirror_out(ctx
->xbridge
, xbundle
)) {
2204 output_normal(ctx
, xbundle
, vlan
);
2207 ctx
->xout
->nf_output_iface
= NF_OUT_FLOOD
;
2211 xlate_normal(struct xlate_ctx
*ctx
)
2213 struct flow_wildcards
*wc
= &ctx
->xout
->wc
;
2214 struct flow
*flow
= &ctx
->xin
->flow
;
2215 struct xbundle
*in_xbundle
;
2216 struct xport
*in_port
;
2217 struct mac_entry
*mac
;
2222 ctx
->xout
->has_normal
= true;
2224 memset(&wc
->masks
.dl_src
, 0xff, sizeof wc
->masks
.dl_src
);
2225 memset(&wc
->masks
.dl_dst
, 0xff, sizeof wc
->masks
.dl_dst
);
2226 wc
->masks
.vlan_tci
|= htons(VLAN_VID_MASK
| VLAN_CFI
);
2228 in_xbundle
= lookup_input_bundle(ctx
->xbridge
, flow
->in_port
.ofp_port
,
2229 ctx
->xin
->packet
!= NULL
, &in_port
);
2231 xlate_report(ctx
, "no input bundle, dropping");
2235 /* Drop malformed frames. */
2236 if (flow
->dl_type
== htons(ETH_TYPE_VLAN
) &&
2237 !(flow
->vlan_tci
& htons(VLAN_CFI
))) {
2238 if (ctx
->xin
->packet
!= NULL
) {
2239 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(1, 5);
2240 VLOG_WARN_RL(&rl
, "bridge %s: dropping packet with partial "
2241 "VLAN tag received on port %s",
2242 ctx
->xbridge
->name
, in_xbundle
->name
);
2244 xlate_report(ctx
, "partial VLAN tag, dropping");
2248 /* Drop frames on bundles reserved for mirroring. */
2249 if (xbundle_mirror_out(ctx
->xbridge
, in_xbundle
)) {
2250 if (ctx
->xin
->packet
!= NULL
) {
2251 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(1, 5);
2252 VLOG_WARN_RL(&rl
, "bridge %s: dropping packet received on port "
2253 "%s, which is reserved exclusively for mirroring",
2254 ctx
->xbridge
->name
, in_xbundle
->name
);
2256 xlate_report(ctx
, "input port is mirror output port, dropping");
2261 vid
= vlan_tci_to_vid(flow
->vlan_tci
);
2262 if (!input_vid_is_valid(vid
, in_xbundle
, ctx
->xin
->packet
!= NULL
)) {
2263 xlate_report(ctx
, "disallowed VLAN VID for this input port, dropping");
2266 vlan
= input_vid_to_vlan(in_xbundle
, vid
);
2268 /* Check other admissibility requirements. */
2269 if (in_port
&& !is_admissible(ctx
, in_port
, vlan
)) {
2273 /* Learn source MAC. */
2274 if (ctx
->xin
->may_learn
) {
2275 update_learning_table(ctx
->xbridge
, flow
, wc
, vlan
, in_xbundle
);
2277 if (ctx
->xin
->xcache
) {
2278 struct xc_entry
*entry
;
2280 /* Save enough info to update mac learning table later. */
2281 entry
= xlate_cache_add_entry(ctx
->xin
->xcache
, XC_NORMAL
);
2282 entry
->u
.normal
.ofproto
= ctx
->xbridge
->ofproto
;
2283 entry
->u
.normal
.flow
= xmemdup(flow
, sizeof *flow
);
2284 entry
->u
.normal
.vlan
= vlan
;
2287 /* Determine output bundle. */
2288 if (mcast_snooping_enabled(ctx
->xbridge
->ms
)
2289 && !eth_addr_is_broadcast(flow
->dl_dst
)
2290 && eth_addr_is_multicast(flow
->dl_dst
)
2291 && flow
->dl_type
== htons(ETH_TYPE_IP
)) {
2292 struct mcast_snooping
*ms
= ctx
->xbridge
->ms
;
2293 struct mcast_group
*grp
;
2295 if (flow
->nw_proto
== IPPROTO_IGMP
) {
2296 if (ctx
->xin
->may_learn
) {
2297 if (mcast_snooping_is_membership(flow
->tp_src
) ||
2298 mcast_snooping_is_query(flow
->tp_src
)) {
2299 update_mcast_snooping_table(ctx
->xbridge
, flow
, vlan
,
2304 if (mcast_snooping_is_membership(flow
->tp_src
)) {
2305 ovs_rwlock_rdlock(&ms
->rwlock
);
2306 xlate_normal_mcast_send_mrouters(ctx
, ms
, in_xbundle
, vlan
);
2307 /* RFC4541: section 2.1.1, item 1: A snooping switch should
2308 * forward IGMP Membership Reports only to those ports where
2309 * multicast routers are attached. Alternatively stated: a
2310 * snooping switch should not forward IGMP Membership Reports
2311 * to ports on which only hosts are attached.
2312 * An administrative control may be provided to override this
2313 * restriction, allowing the report messages to be flooded to
2315 xlate_normal_mcast_send_rports(ctx
, ms
, in_xbundle
, vlan
);
2316 ovs_rwlock_unlock(&ms
->rwlock
);
2318 xlate_report(ctx
, "multicast traffic, flooding");
2319 xlate_normal_flood(ctx
, in_xbundle
, vlan
);
2323 if (ip_is_local_multicast(flow
->nw_dst
)) {
2324 /* RFC4541: section 2.1.2, item 2: Packets with a dst IP
2325 * address in the 224.0.0.x range which are not IGMP must
2326 * be forwarded on all ports */
2327 xlate_report(ctx
, "RFC4541: section 2.1.2, item 2, flooding");
2328 xlate_normal_flood(ctx
, in_xbundle
, vlan
);
2333 /* forwarding to group base ports */
2334 ovs_rwlock_rdlock(&ms
->rwlock
);
2335 grp
= mcast_snooping_lookup(ms
, flow
->nw_dst
, vlan
);
2337 xlate_normal_mcast_send_group(ctx
, ms
, grp
, in_xbundle
, vlan
);
2338 xlate_normal_mcast_send_fports(ctx
, ms
, in_xbundle
, vlan
);
2339 xlate_normal_mcast_send_mrouters(ctx
, ms
, in_xbundle
, vlan
);
2341 if (mcast_snooping_flood_unreg(ms
)) {
2342 xlate_report(ctx
, "unregistered multicast, flooding");
2343 xlate_normal_flood(ctx
, in_xbundle
, vlan
);
2345 xlate_normal_mcast_send_mrouters(ctx
, ms
, in_xbundle
, vlan
);
2346 xlate_normal_mcast_send_fports(ctx
, ms
, in_xbundle
, vlan
);
2349 ovs_rwlock_unlock(&ms
->rwlock
);
2351 ovs_rwlock_rdlock(&ctx
->xbridge
->ml
->rwlock
);
2352 mac
= mac_learning_lookup(ctx
->xbridge
->ml
, flow
->dl_dst
, vlan
);
2353 mac_port
= mac
? mac_entry_get_port(ctx
->xbridge
->ml
, mac
) : NULL
;
2354 ovs_rwlock_unlock(&ctx
->xbridge
->ml
->rwlock
);
2357 struct xlate_cfg
*xcfg
= ovsrcu_get(struct xlate_cfg
*, &xcfgp
);
2358 struct xbundle
*mac_xbundle
= xbundle_lookup(xcfg
, mac_port
);
2359 if (mac_xbundle
&& mac_xbundle
!= in_xbundle
) {
2360 xlate_report(ctx
, "forwarding to learned port");
2361 output_normal(ctx
, mac_xbundle
, vlan
);
2362 } else if (!mac_xbundle
) {
2363 xlate_report(ctx
, "learned port is unknown, dropping");
2365 xlate_report(ctx
, "learned port is input port, dropping");
2368 xlate_report(ctx
, "no learned MAC for destination, flooding");
2369 xlate_normal_flood(ctx
, in_xbundle
, vlan
);
2374 /* Compose SAMPLE action for sFlow or IPFIX. The given probability is
2375 * the number of packets out of UINT32_MAX to sample. The given
2376 * cookie is passed back in the callback for each sampled packet.
2379 compose_sample_action(const struct xbridge
*xbridge
,
2380 struct ofpbuf
*odp_actions
,
2381 const struct flow
*flow
,
2382 const uint32_t probability
,
2383 const union user_action_cookie
*cookie
,
2384 const size_t cookie_size
,
2385 const odp_port_t tunnel_out_port
)
2387 size_t sample_offset
, actions_offset
;
2388 odp_port_t odp_port
;
2392 sample_offset
= nl_msg_start_nested(odp_actions
, OVS_ACTION_ATTR_SAMPLE
);
2394 nl_msg_put_u32(odp_actions
, OVS_SAMPLE_ATTR_PROBABILITY
, probability
);
2396 actions_offset
= nl_msg_start_nested(odp_actions
, OVS_SAMPLE_ATTR_ACTIONS
);
2398 odp_port
= ofp_port_to_odp_port(xbridge
, flow
->in_port
.ofp_port
);
2399 pid
= dpif_port_get_pid(xbridge
->dpif
, odp_port
,
2400 flow_hash_5tuple(flow
, 0));
2401 cookie_offset
= odp_put_userspace_action(pid
, cookie
, cookie_size
,
2402 tunnel_out_port
, odp_actions
);
2404 nl_msg_end_nested(odp_actions
, actions_offset
);
2405 nl_msg_end_nested(odp_actions
, sample_offset
);
2406 return cookie_offset
;
2410 compose_sflow_cookie(const struct xbridge
*xbridge
, ovs_be16 vlan_tci
,
2411 odp_port_t odp_port
, unsigned int n_outputs
,
2412 union user_action_cookie
*cookie
)
2416 cookie
->type
= USER_ACTION_COOKIE_SFLOW
;
2417 cookie
->sflow
.vlan_tci
= vlan_tci
;
2419 /* See http://www.sflow.org/sflow_version_5.txt (search for "Input/output
2420 * port information") for the interpretation of cookie->output. */
2421 switch (n_outputs
) {
2423 /* 0x40000000 | 256 means "packet dropped for unknown reason". */
2424 cookie
->sflow
.output
= 0x40000000 | 256;
2428 ifindex
= dpif_sflow_odp_port_to_ifindex(xbridge
->sflow
, odp_port
);
2430 cookie
->sflow
.output
= ifindex
;
2435 /* 0x80000000 means "multiple output ports. */
2436 cookie
->sflow
.output
= 0x80000000 | n_outputs
;
2441 /* Compose SAMPLE action for sFlow bridge sampling. */
2443 compose_sflow_action(const struct xbridge
*xbridge
,
2444 struct ofpbuf
*odp_actions
,
2445 const struct flow
*flow
,
2446 odp_port_t odp_port
)
2448 uint32_t probability
;
2449 union user_action_cookie cookie
;
2451 if (!xbridge
->sflow
|| flow
->in_port
.ofp_port
== OFPP_NONE
) {
2455 probability
= dpif_sflow_get_probability(xbridge
->sflow
);
2456 compose_sflow_cookie(xbridge
, htons(0), odp_port
,
2457 odp_port
== ODPP_NONE
? 0 : 1, &cookie
);
2459 return compose_sample_action(xbridge
, odp_actions
, flow
, probability
,
2460 &cookie
, sizeof cookie
.sflow
, ODPP_NONE
);
2464 compose_flow_sample_cookie(uint16_t probability
, uint32_t collector_set_id
,
2465 uint32_t obs_domain_id
, uint32_t obs_point_id
,
2466 union user_action_cookie
*cookie
)
2468 cookie
->type
= USER_ACTION_COOKIE_FLOW_SAMPLE
;
2469 cookie
->flow_sample
.probability
= probability
;
2470 cookie
->flow_sample
.collector_set_id
= collector_set_id
;
2471 cookie
->flow_sample
.obs_domain_id
= obs_domain_id
;
2472 cookie
->flow_sample
.obs_point_id
= obs_point_id
;
2476 compose_ipfix_cookie(union user_action_cookie
*cookie
,
2477 odp_port_t output_odp_port
)
2479 cookie
->type
= USER_ACTION_COOKIE_IPFIX
;
2480 cookie
->ipfix
.output_odp_port
= output_odp_port
;
2483 /* Compose SAMPLE action for IPFIX bridge sampling. */
2485 compose_ipfix_action(const struct xbridge
*xbridge
,
2486 struct ofpbuf
*odp_actions
,
2487 const struct flow
*flow
,
2488 odp_port_t output_odp_port
)
2490 uint32_t probability
;
2491 union user_action_cookie cookie
;
2492 odp_port_t tunnel_out_port
= ODPP_NONE
;
2494 if (!xbridge
->ipfix
|| flow
->in_port
.ofp_port
== OFPP_NONE
) {
2498 /* For input case, output_odp_port is ODPP_NONE, which is an invalid port
2500 if (output_odp_port
== ODPP_NONE
&&
2501 !dpif_ipfix_get_bridge_exporter_input_sampling(xbridge
->ipfix
)) {
2505 /* For output case, output_odp_port is valid*/
2506 if (output_odp_port
!= ODPP_NONE
) {
2507 if (!dpif_ipfix_get_bridge_exporter_output_sampling(xbridge
->ipfix
)) {
2510 /* If tunnel sampling is enabled, put an additional option attribute:
2511 * OVS_USERSPACE_ATTR_TUNNEL_OUT_PORT
2513 if (dpif_ipfix_get_bridge_exporter_tunnel_sampling(xbridge
->ipfix
) &&
2514 dpif_ipfix_get_tunnel_port(xbridge
->ipfix
, output_odp_port
) ) {
2515 tunnel_out_port
= output_odp_port
;
2519 probability
= dpif_ipfix_get_bridge_exporter_probability(xbridge
->ipfix
);
2520 compose_ipfix_cookie(&cookie
, output_odp_port
);
2522 compose_sample_action(xbridge
, odp_actions
, flow
, probability
,
2523 &cookie
, sizeof cookie
.ipfix
, tunnel_out_port
);
2526 /* SAMPLE action for sFlow must be first action in any given list of
2527 * actions. At this point we do not have all information required to
2528 * build it. So try to build sample action as complete as possible. */
2530 add_sflow_action(struct xlate_ctx
*ctx
)
2532 ctx
->user_cookie_offset
= compose_sflow_action(ctx
->xbridge
,
2533 ctx
->xout
->odp_actions
,
2534 &ctx
->xin
->flow
, ODPP_NONE
);
2535 ctx
->sflow_odp_port
= 0;
2536 ctx
->sflow_n_outputs
= 0;
2539 /* SAMPLE action for IPFIX must be 1st or 2nd action in any given list
2540 * of actions, eventually after the SAMPLE action for sFlow. */
2542 add_ipfix_action(struct xlate_ctx
*ctx
)
2544 compose_ipfix_action(ctx
->xbridge
, ctx
->xout
->odp_actions
,
2545 &ctx
->xin
->flow
, ODPP_NONE
);
2549 add_ipfix_output_action(struct xlate_ctx
*ctx
, odp_port_t port
)
2551 compose_ipfix_action(ctx
->xbridge
, ctx
->xout
->odp_actions
,
2552 &ctx
->xin
->flow
, port
);
2555 /* Fix SAMPLE action according to data collected while composing ODP actions.
2556 * We need to fix SAMPLE actions OVS_SAMPLE_ATTR_ACTIONS attribute, i.e. nested
2557 * USERSPACE action's user-cookie which is required for sflow. */
2559 fix_sflow_action(struct xlate_ctx
*ctx
)
2561 const struct flow
*base
= &ctx
->base_flow
;
2562 union user_action_cookie
*cookie
;
2564 if (!ctx
->user_cookie_offset
) {
2568 cookie
= ofpbuf_at(ctx
->xout
->odp_actions
, ctx
->user_cookie_offset
,
2569 sizeof cookie
->sflow
);
2570 ovs_assert(cookie
->type
== USER_ACTION_COOKIE_SFLOW
);
2572 compose_sflow_cookie(ctx
->xbridge
, base
->vlan_tci
,
2573 ctx
->sflow_odp_port
, ctx
->sflow_n_outputs
, cookie
);
2576 static enum slow_path_reason
2577 process_special(struct xlate_ctx
*ctx
, const struct flow
*flow
,
2578 const struct xport
*xport
, const struct dp_packet
*packet
)
2580 struct flow_wildcards
*wc
= &ctx
->xout
->wc
;
2581 const struct xbridge
*xbridge
= ctx
->xbridge
;
2585 } else if (xport
->cfm
&& cfm_should_process_flow(xport
->cfm
, flow
, wc
)) {
2587 cfm_process_heartbeat(xport
->cfm
, packet
);
2590 } else if (xport
->bfd
&& bfd_should_process_flow(xport
->bfd
, flow
, wc
)) {
2592 bfd_process_packet(xport
->bfd
, flow
, packet
);
2593 /* If POLL received, immediately sends FINAL back. */
2594 if (bfd_should_send_packet(xport
->bfd
)) {
2595 ofproto_dpif_monitor_port_send_soon(xport
->ofport
);
2599 } else if (xport
->xbundle
&& xport
->xbundle
->lacp
2600 && flow
->dl_type
== htons(ETH_TYPE_LACP
)) {
2602 lacp_process_packet(xport
->xbundle
->lacp
, xport
->ofport
, packet
);
2605 } else if ((xbridge
->stp
|| xbridge
->rstp
) &&
2606 stp_should_process_flow(flow
, wc
)) {
2609 ? stp_process_packet(xport
, packet
)
2610 : rstp_process_packet(xport
, packet
);
2613 } else if (xport
->lldp
&& lldp_should_process_flow(flow
)) {
2615 lldp_process_packet(xport
->lldp
, packet
);
2624 tnl_route_lookup_flow(const struct flow
*oflow
,
2625 ovs_be32
*ip
, struct xport
**out_port
)
2627 char out_dev
[IFNAMSIZ
];
2628 struct xbridge
*xbridge
;
2629 struct xlate_cfg
*xcfg
;
2632 if (!ovs_router_lookup(oflow
->tunnel
.ip_dst
, out_dev
, &gw
)) {
2639 *ip
= oflow
->tunnel
.ip_dst
;
2642 xcfg
= ovsrcu_get(struct xlate_cfg
*, &xcfgp
);
2645 HMAP_FOR_EACH (xbridge
, hmap_node
, &xcfg
->xbridges
) {
2646 if (!strncmp(xbridge
->name
, out_dev
, IFNAMSIZ
)) {
2649 HMAP_FOR_EACH (port
, ofp_node
, &xbridge
->xports
) {
2650 if (!strncmp(netdev_get_name(port
->netdev
), out_dev
, IFNAMSIZ
)) {
2661 xlate_flood_packet(struct xbridge
*xbridge
, struct dp_packet
*packet
)
2663 struct ofpact_output output
;
2666 ofpact_init(&output
.ofpact
, OFPACT_OUTPUT
, sizeof output
);
2667 /* Use OFPP_NONE as the in_port to avoid special packet processing. */
2668 flow_extract(packet
, &flow
);
2669 flow
.in_port
.ofp_port
= OFPP_NONE
;
2670 output
.port
= OFPP_FLOOD
;
2673 return ofproto_dpif_execute_actions(xbridge
->ofproto
, &flow
, NULL
,
2674 &output
.ofpact
, sizeof output
,
2679 tnl_send_arp_request(const struct xport
*out_dev
, const uint8_t eth_src
[ETH_ADDR_LEN
],
2680 ovs_be32 ip_src
, ovs_be32 ip_dst
)
2682 struct xbridge
*xbridge
= out_dev
->xbridge
;
2683 struct dp_packet packet
;
2685 dp_packet_init(&packet
, 0);
2686 compose_arp(&packet
, eth_src
, ip_src
, ip_dst
);
2688 xlate_flood_packet(xbridge
, &packet
);
2689 dp_packet_uninit(&packet
);
2693 build_tunnel_send(const struct xlate_ctx
*ctx
, const struct xport
*xport
,
2694 const struct flow
*flow
, odp_port_t tunnel_odp_port
)
2696 struct ovs_action_push_tnl tnl_push_data
;
2697 struct xport
*out_dev
= NULL
;
2698 ovs_be32 s_ip
, d_ip
= 0;
2699 uint8_t smac
[ETH_ADDR_LEN
];
2700 uint8_t dmac
[ETH_ADDR_LEN
];
2703 err
= tnl_route_lookup_flow(flow
, &d_ip
, &out_dev
);
2708 /* Use mac addr of bridge port of the peer. */
2709 err
= netdev_get_etheraddr(out_dev
->netdev
, smac
);
2714 err
= netdev_get_in4(out_dev
->netdev
, (struct in_addr
*) &s_ip
, NULL
);
2719 err
= tnl_arp_lookup(out_dev
->xbridge
->name
, d_ip
, dmac
);
2721 tnl_send_arp_request(out_dev
, smac
, s_ip
, d_ip
);
2724 if (ctx
->xin
->xcache
) {
2725 struct xc_entry
*entry
;
2727 entry
= xlate_cache_add_entry(ctx
->xin
->xcache
, XC_TNL_ARP
);
2728 ovs_strlcpy(entry
->u
.tnl_arp_cache
.br_name
, out_dev
->xbridge
->name
,
2729 sizeof entry
->u
.tnl_arp_cache
.br_name
);
2730 entry
->u
.tnl_arp_cache
.d_ip
= d_ip
;
2732 err
= tnl_port_build_header(xport
->ofport
, flow
,
2733 dmac
, smac
, s_ip
, &tnl_push_data
);
2737 tnl_push_data
.tnl_port
= odp_to_u32(tunnel_odp_port
);
2738 tnl_push_data
.out_port
= odp_to_u32(out_dev
->odp_port
);
2739 odp_put_tnl_push_action(ctx
->xout
->odp_actions
, &tnl_push_data
);
2744 compose_output_action__(struct xlate_ctx
*ctx
, ofp_port_t ofp_port
,
2745 const struct xlate_bond_recirc
*xr
, bool check_stp
)
2747 const struct xport
*xport
= get_ofp_port(ctx
->xbridge
, ofp_port
);
2748 struct flow_wildcards
*wc
= &ctx
->xout
->wc
;
2749 struct flow
*flow
= &ctx
->xin
->flow
;
2750 struct flow_tnl flow_tnl
;
2751 ovs_be16 flow_vlan_tci
;
2752 uint32_t flow_pkt_mark
;
2753 uint8_t flow_nw_tos
;
2754 odp_port_t out_port
, odp_port
;
2755 bool tnl_push_pop_send
= false;
2758 /* If 'struct flow' gets additional metadata, we'll need to zero it out
2759 * before traversing a patch port. */
2760 BUILD_ASSERT_DECL(FLOW_WC_SEQ
== 31);
2761 memset(&flow_tnl
, 0, sizeof flow_tnl
);
2764 xlate_report(ctx
, "Nonexistent output port");
2766 } else if (xport
->config
& OFPUTIL_PC_NO_FWD
) {
2767 xlate_report(ctx
, "OFPPC_NO_FWD set, skipping output");
2769 } else if (check_stp
) {
2770 if (is_stp(&ctx
->base_flow
)) {
2771 if (!xport_stp_should_forward_bpdu(xport
) &&
2772 !xport_rstp_should_manage_bpdu(xport
)) {
2773 if (ctx
->xbridge
->stp
!= NULL
) {
2774 xlate_report(ctx
, "STP not in listening state, "
2775 "skipping bpdu output");
2776 } else if (ctx
->xbridge
->rstp
!= NULL
) {
2777 xlate_report(ctx
, "RSTP not managing BPDU in this state, "
2778 "skipping bpdu output");
2782 } else if (!xport_stp_forward_state(xport
) ||
2783 !xport_rstp_forward_state(xport
)) {
2784 if (ctx
->xbridge
->stp
!= NULL
) {
2785 xlate_report(ctx
, "STP not in forwarding state, "
2787 } else if (ctx
->xbridge
->rstp
!= NULL
) {
2788 xlate_report(ctx
, "RSTP not in forwarding state, "
2795 if (mbridge_has_mirrors(ctx
->xbridge
->mbridge
) && xport
->xbundle
) {
2796 ctx
->xout
->mirrors
|= xbundle_mirror_dst(xport
->xbundle
->xbridge
,
2801 const struct xport
*peer
= xport
->peer
;
2802 struct flow old_flow
= ctx
->xin
->flow
;
2803 bool old_was_mpls
= ctx
->was_mpls
;
2804 enum slow_path_reason special
;
2805 struct ofpbuf old_stack
= ctx
->stack
;
2806 union mf_subvalue new_stack
[1024 / sizeof(union mf_subvalue
)];
2807 struct ofpbuf old_action_set
= ctx
->action_set
;
2808 uint64_t actset_stub
[1024 / 8];
2810 ofpbuf_use_stub(&ctx
->stack
, new_stack
, sizeof new_stack
);
2811 ofpbuf_use_stub(&ctx
->action_set
, actset_stub
, sizeof actset_stub
);
2812 ctx
->xbridge
= peer
->xbridge
;
2813 flow
->in_port
.ofp_port
= peer
->ofp_port
;
2814 flow
->metadata
= htonll(0);
2815 memset(&flow
->tunnel
, 0, sizeof flow
->tunnel
);
2816 memset(flow
->regs
, 0, sizeof flow
->regs
);
2817 flow
->actset_output
= OFPP_UNSET
;
2819 special
= process_special(ctx
, &ctx
->xin
->flow
, peer
,
2822 ctx
->xout
->slow
|= special
;
2823 } else if (may_receive(peer
, ctx
)) {
2824 if (xport_stp_forward_state(peer
) && xport_rstp_forward_state(peer
)) {
2825 xlate_table_action(ctx
, flow
->in_port
.ofp_port
, 0, true, true);
2826 if (ctx
->action_set
.size
) {
2827 /* Translate action set only if not dropping the packet and
2828 * not recirculating. */
2829 if (!exit_recirculates(ctx
)) {
2830 xlate_action_set(ctx
);
2833 /* Check if need to recirculate. */
2834 if (exit_recirculates(ctx
)) {
2835 compose_recirculate_action(ctx
);
2838 /* Forwarding is disabled by STP and RSTP. Let OFPP_NORMAL and
2839 * the learning action look at the packet, then drop it. */
2840 struct flow old_base_flow
= ctx
->base_flow
;
2841 size_t old_size
= ctx
->xout
->odp_actions
->size
;
2842 mirror_mask_t old_mirrors
= ctx
->xout
->mirrors
;
2844 xlate_table_action(ctx
, flow
->in_port
.ofp_port
, 0, true, true);
2845 ctx
->xout
->mirrors
= old_mirrors
;
2846 ctx
->base_flow
= old_base_flow
;
2847 ctx
->xout
->odp_actions
->size
= old_size
;
2849 /* Undo changes that may have been done for recirculation. */
2850 if (exit_recirculates(ctx
)) {
2851 ctx
->action_set
.size
= ctx
->recirc_action_offset
;
2852 ctx
->recirc_action_offset
= -1;
2853 ctx
->last_unroll_offset
= -1;
2858 ctx
->xin
->flow
= old_flow
;
2859 ctx
->xbridge
= xport
->xbridge
;
2860 ofpbuf_uninit(&ctx
->action_set
);
2861 ctx
->action_set
= old_action_set
;
2862 ofpbuf_uninit(&ctx
->stack
);
2863 ctx
->stack
= old_stack
;
2865 /* The peer bridge popping MPLS should have no effect on the original
2867 ctx
->was_mpls
= old_was_mpls
;
2869 /* The fact that the peer bridge exits (for any reason) does not mean
2870 * that the original bridge should exit. Specifically, if the peer
2871 * bridge recirculates (which typically modifies the packet), the
2872 * original bridge must continue processing with the original, not the
2873 * recirculated packet! */
2876 if (ctx
->xin
->resubmit_stats
) {
2877 netdev_vport_inc_tx(xport
->netdev
, ctx
->xin
->resubmit_stats
);
2878 netdev_vport_inc_rx(peer
->netdev
, ctx
->xin
->resubmit_stats
);
2880 bfd_account_rx(peer
->bfd
, ctx
->xin
->resubmit_stats
);
2883 if (ctx
->xin
->xcache
) {
2884 struct xc_entry
*entry
;
2886 entry
= xlate_cache_add_entry(ctx
->xin
->xcache
, XC_NETDEV
);
2887 entry
->u
.dev
.tx
= netdev_ref(xport
->netdev
);
2888 entry
->u
.dev
.rx
= netdev_ref(peer
->netdev
);
2889 entry
->u
.dev
.bfd
= bfd_ref(peer
->bfd
);
2894 flow_vlan_tci
= flow
->vlan_tci
;
2895 flow_pkt_mark
= flow
->pkt_mark
;
2896 flow_nw_tos
= flow
->nw_tos
;
2898 if (count_skb_priorities(xport
)) {
2899 memset(&wc
->masks
.skb_priority
, 0xff, sizeof wc
->masks
.skb_priority
);
2900 if (dscp_from_skb_priority(xport
, flow
->skb_priority
, &dscp
)) {
2901 wc
->masks
.nw_tos
|= IP_DSCP_MASK
;
2902 flow
->nw_tos
&= ~IP_DSCP_MASK
;
2903 flow
->nw_tos
|= dscp
;
2907 if (xport
->is_tunnel
) {
2908 /* Save tunnel metadata so that changes made due to
2909 * the Logical (tunnel) Port are not visible for any further
2910 * matches, while explicit set actions on tunnel metadata are.
2912 flow_tnl
= flow
->tunnel
;
2913 odp_port
= tnl_port_send(xport
->ofport
, flow
, &ctx
->xout
->wc
);
2914 if (odp_port
== ODPP_NONE
) {
2915 xlate_report(ctx
, "Tunneling decided against output");
2916 goto out
; /* restore flow_nw_tos */
2918 if (flow
->tunnel
.ip_dst
== ctx
->orig_tunnel_ip_dst
) {
2919 xlate_report(ctx
, "Not tunneling to our own address");
2920 goto out
; /* restore flow_nw_tos */
2922 if (ctx
->xin
->resubmit_stats
) {
2923 netdev_vport_inc_tx(xport
->netdev
, ctx
->xin
->resubmit_stats
);
2925 if (ctx
->xin
->xcache
) {
2926 struct xc_entry
*entry
;
2928 entry
= xlate_cache_add_entry(ctx
->xin
->xcache
, XC_NETDEV
);
2929 entry
->u
.dev
.tx
= netdev_ref(xport
->netdev
);
2931 out_port
= odp_port
;
2932 if (ovs_native_tunneling_is_on(ctx
->xbridge
->ofproto
)) {
2933 tnl_push_pop_send
= true;
2935 commit_odp_tunnel_action(flow
, &ctx
->base_flow
,
2936 ctx
->xout
->odp_actions
);
2937 flow
->tunnel
= flow_tnl
; /* Restore tunnel metadata */
2940 odp_port
= xport
->odp_port
;
2941 out_port
= odp_port
;
2942 if (ofproto_has_vlan_splinters(ctx
->xbridge
->ofproto
)) {
2943 ofp_port_t vlandev_port
;
2945 wc
->masks
.vlan_tci
|= htons(VLAN_VID_MASK
| VLAN_CFI
);
2946 vlandev_port
= vsp_realdev_to_vlandev(ctx
->xbridge
->ofproto
,
2947 ofp_port
, flow
->vlan_tci
);
2948 if (vlandev_port
!= ofp_port
) {
2949 out_port
= ofp_port_to_odp_port(ctx
->xbridge
, vlandev_port
);
2950 flow
->vlan_tci
= htons(0);
2955 if (out_port
!= ODPP_NONE
) {
2956 ctx
->xout
->slow
|= commit_odp_actions(flow
, &ctx
->base_flow
,
2957 ctx
->xout
->odp_actions
,
2959 ctx
->xbridge
->masked_set_action
);
2962 struct ovs_action_hash
*act_hash
;
2965 act_hash
= nl_msg_put_unspec_uninit(ctx
->xout
->odp_actions
,
2966 OVS_ACTION_ATTR_HASH
,
2968 act_hash
->hash_alg
= xr
->hash_alg
;
2969 act_hash
->hash_basis
= xr
->hash_basis
;
2971 /* Recirc action. */
2972 nl_msg_put_u32(ctx
->xout
->odp_actions
, OVS_ACTION_ATTR_RECIRC
,
2976 if (tnl_push_pop_send
) {
2977 build_tunnel_send(ctx
, xport
, flow
, odp_port
);
2978 flow
->tunnel
= flow_tnl
; /* Restore tunnel metadata */
2980 odp_port_t odp_tnl_port
= ODPP_NONE
;
2982 /* XXX: Write better Filter for tunnel port. We can use inport
2983 * int tunnel-port flow to avoid these checks completely. */
2984 if (ofp_port
== OFPP_LOCAL
&&
2985 ovs_native_tunneling_is_on(ctx
->xbridge
->ofproto
)) {
2987 odp_tnl_port
= tnl_port_map_lookup(flow
, wc
);
2990 if (odp_tnl_port
!= ODPP_NONE
) {
2991 nl_msg_put_odp_port(ctx
->xout
->odp_actions
,
2992 OVS_ACTION_ATTR_TUNNEL_POP
,
2995 /* Tunnel push-pop action is not compatible with
2997 add_ipfix_output_action(ctx
, out_port
);
2998 nl_msg_put_odp_port(ctx
->xout
->odp_actions
,
2999 OVS_ACTION_ATTR_OUTPUT
,
3005 ctx
->sflow_odp_port
= odp_port
;
3006 ctx
->sflow_n_outputs
++;
3007 ctx
->xout
->nf_output_iface
= ofp_port
;
3012 flow
->vlan_tci
= flow_vlan_tci
;
3013 flow
->pkt_mark
= flow_pkt_mark
;
3014 flow
->nw_tos
= flow_nw_tos
;
3018 compose_output_action(struct xlate_ctx
*ctx
, ofp_port_t ofp_port
,
3019 const struct xlate_bond_recirc
*xr
)
3021 compose_output_action__(ctx
, ofp_port
, xr
, true);
3025 xlate_recursively(struct xlate_ctx
*ctx
, struct rule_dpif
*rule
)
3027 struct rule_dpif
*old_rule
= ctx
->rule
;
3028 ovs_be64 old_cookie
= ctx
->rule_cookie
;
3029 const struct rule_actions
*actions
;
3031 if (ctx
->xin
->resubmit_stats
) {
3032 rule_dpif_credit_stats(rule
, ctx
->xin
->resubmit_stats
);
3038 ctx
->rule_cookie
= rule_dpif_get_flow_cookie(rule
);
3039 actions
= rule_dpif_get_actions(rule
);
3040 do_xlate_actions(actions
->ofpacts
, actions
->ofpacts_len
, ctx
);
3041 ctx
->rule_cookie
= old_cookie
;
3042 ctx
->rule
= old_rule
;
3047 xlate_resubmit_resource_check(struct xlate_ctx
*ctx
)
3049 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(1, 1);
3051 if (ctx
->recurse
>= MAX_RESUBMIT_RECURSION
+ MAX_INTERNAL_RESUBMITS
) {
3052 VLOG_ERR_RL(&rl
, "resubmit actions recursed over %d times",
3053 MAX_RESUBMIT_RECURSION
);
3054 } else if (ctx
->resubmits
>= MAX_RESUBMITS
+ MAX_INTERNAL_RESUBMITS
) {
3055 VLOG_ERR_RL(&rl
, "over %d resubmit actions", MAX_RESUBMITS
);
3056 } else if (ctx
->xout
->odp_actions
->size
> UINT16_MAX
) {
3057 VLOG_ERR_RL(&rl
, "resubmits yielded over 64 kB of actions");
3058 } else if (ctx
->stack
.size
>= 65536) {
3059 VLOG_ERR_RL(&rl
, "resubmits yielded over 64 kB of stack");
3068 xlate_table_action(struct xlate_ctx
*ctx
, ofp_port_t in_port
, uint8_t table_id
,
3069 bool may_packet_in
, bool honor_table_miss
)
3071 /* Check if we need to recirculate before matching in a table. */
3072 if (ctx
->was_mpls
) {
3073 ctx_trigger_recirculation(ctx
);
3076 if (xlate_resubmit_resource_check(ctx
)) {
3077 struct flow_wildcards
*wc
;
3078 uint8_t old_table_id
= ctx
->table_id
;
3079 struct rule_dpif
*rule
;
3081 ctx
->table_id
= table_id
;
3082 wc
= (ctx
->xin
->skip_wildcards
) ? NULL
: &ctx
->xout
->wc
;
3084 rule
= rule_dpif_lookup_from_table(ctx
->xbridge
->ofproto
,
3085 &ctx
->xin
->flow
, wc
,
3086 ctx
->xin
->xcache
!= NULL
,
3087 ctx
->xin
->resubmit_stats
,
3088 &ctx
->table_id
, in_port
,
3089 may_packet_in
, honor_table_miss
);
3091 if (OVS_UNLIKELY(ctx
->xin
->resubmit_hook
)) {
3092 ctx
->xin
->resubmit_hook(ctx
->xin
, rule
, ctx
->recurse
+ 1);
3096 /* Fill in the cache entry here instead of xlate_recursively
3097 * to make the reference counting more explicit. We take a
3098 * reference in the lookups above if we are going to cache the
3100 if (ctx
->xin
->xcache
) {
3101 struct xc_entry
*entry
;
3103 entry
= xlate_cache_add_entry(ctx
->xin
->xcache
, XC_RULE
);
3104 entry
->u
.rule
= rule
;
3106 xlate_recursively(ctx
, rule
);
3109 ctx
->table_id
= old_table_id
;
3117 xlate_group_stats(struct xlate_ctx
*ctx
, struct group_dpif
*group
,
3118 struct ofputil_bucket
*bucket
)
3120 if (ctx
->xin
->resubmit_stats
) {
3121 group_dpif_credit_stats(group
, bucket
, ctx
->xin
->resubmit_stats
);
3123 if (ctx
->xin
->xcache
) {
3124 struct xc_entry
*entry
;
3126 entry
= xlate_cache_add_entry(ctx
->xin
->xcache
, XC_GROUP
);
3127 entry
->u
.group
.group
= group_dpif_ref(group
);
3128 entry
->u
.group
.bucket
= bucket
;
3133 xlate_group_bucket(struct xlate_ctx
*ctx
, struct ofputil_bucket
*bucket
)
3135 uint64_t action_list_stub
[1024 / 8];
3136 struct ofpbuf action_list
, action_set
;
3137 struct flow old_flow
= ctx
->xin
->flow
;
3138 bool old_was_mpls
= ctx
->was_mpls
;
3140 ofpbuf_use_const(&action_set
, bucket
->ofpacts
, bucket
->ofpacts_len
);
3141 ofpbuf_use_stub(&action_list
, action_list_stub
, sizeof action_list_stub
);
3143 ofpacts_execute_action_set(&action_list
, &action_set
);
3145 do_xlate_actions(action_list
.data
, action_list
.size
, ctx
);
3148 ofpbuf_uninit(&action_set
);
3149 ofpbuf_uninit(&action_list
);
3151 /* Check if need to recirculate. */
3152 if (exit_recirculates(ctx
)) {
3153 compose_recirculate_action(ctx
);
3156 /* Roll back flow to previous state.
3157 * This is equivalent to cloning the packet for each bucket.
3159 * As a side effect any subsequently applied actions will
3160 * also effectively be applied to a clone of the packet taken
3161 * just before applying the all or indirect group.
3163 * Note that group buckets are action sets, hence they cannot modify the
3164 * main action set. Also any stack actions are ignored when executing an
3165 * action set, so group buckets cannot change the stack either.
3166 * However, we do allow resubmit actions in group buckets, which could
3167 * break the above assumptions. It is up to the controller to not mess up
3168 * with the action_set and stack in the tables resubmitted to from
3170 ctx
->xin
->flow
= old_flow
;
3172 /* The group bucket popping MPLS should have no effect after bucket
3174 ctx
->was_mpls
= old_was_mpls
;
3176 /* The fact that the group bucket exits (for any reason) does not mean that
3177 * the translation after the group action should exit. Specifically, if
3178 * the group bucket recirculates (which typically modifies the packet), the
3179 * actions after the group action must continue processing with the
3180 * original, not the recirculated packet! */
3185 xlate_all_group(struct xlate_ctx
*ctx
, struct group_dpif
*group
)
3187 struct ofputil_bucket
*bucket
;
3188 const struct ovs_list
*buckets
;
3190 group_dpif_get_buckets(group
, &buckets
);
3192 LIST_FOR_EACH (bucket
, list_node
, buckets
) {
3193 xlate_group_bucket(ctx
, bucket
);
3195 xlate_group_stats(ctx
, group
, NULL
);
3199 xlate_ff_group(struct xlate_ctx
*ctx
, struct group_dpif
*group
)
3201 struct ofputil_bucket
*bucket
;
3203 bucket
= group_first_live_bucket(ctx
, group
, 0);
3205 xlate_group_bucket(ctx
, bucket
);
3206 xlate_group_stats(ctx
, group
, bucket
);
3211 xlate_default_select_group(struct xlate_ctx
*ctx
, struct group_dpif
*group
)
3213 struct flow_wildcards
*wc
= &ctx
->xout
->wc
;
3214 struct ofputil_bucket
*bucket
;
3217 basis
= flow_hash_symmetric_l4(&ctx
->xin
->flow
, 0);
3218 flow_mask_hash_fields(&ctx
->xin
->flow
, wc
, NX_HASH_FIELDS_SYMMETRIC_L4
);
3219 bucket
= group_best_live_bucket(ctx
, group
, basis
);
3221 xlate_group_bucket(ctx
, bucket
);
3222 xlate_group_stats(ctx
, group
, bucket
);
3227 xlate_hash_fields_select_group(struct xlate_ctx
*ctx
, struct group_dpif
*group
)
3229 struct mf_bitmap hash_fields
= MF_BITMAP_INITIALIZER
;
3230 struct flow_wildcards
*wc
= &ctx
->xout
->wc
;
3231 const struct field_array
*fields
;
3232 struct ofputil_bucket
*bucket
;
3236 fields
= group_dpif_get_fields(group
);
3237 basis
= hash_uint64(group_dpif_get_selection_method_param(group
));
3239 /* Determine which fields to hash */
3240 for (i
= 0; i
< MFF_N_IDS
; i
++) {
3241 if (bitmap_is_set(fields
->used
.bm
, i
)) {
3242 const struct mf_field
*mf
;
3244 /* If the field is already present in 'hash_fields' then
3245 * this loop has already checked that it and its pre-requisites
3246 * are present in the flow and its pre-requisites have
3247 * already been added to 'hash_fields'. There is nothing more
3248 * to do here and as an optimisation the loop can continue. */
3249 if (bitmap_is_set(hash_fields
.bm
, i
)) {
3255 /* Only hash a field if it and its pre-requisites are present
3257 if (!mf_are_prereqs_ok(mf
, &ctx
->xin
->flow
)) {
3261 /* Hash both the field and its pre-requisites */
3262 mf_bitmap_set_field_and_prereqs(mf
, &hash_fields
);
3266 /* Hash the fields */
3267 for (i
= 0; i
< MFF_N_IDS
; i
++) {
3268 if (bitmap_is_set(hash_fields
.bm
, i
)) {
3269 const struct mf_field
*mf
= mf_from_id(i
);
3270 union mf_value value
;
3273 mf_get_value(mf
, &ctx
->xin
->flow
, &value
);
3274 /* This seems inefficient but so does apply_mask() */
3275 for (j
= 0; j
< mf
->n_bytes
; j
++) {
3276 ((uint8_t *) &value
)[j
] &= ((uint8_t *) &fields
->value
[i
])[j
];
3278 basis
= hash_bytes(&value
, mf
->n_bytes
, basis
);
3280 mf_mask_field(mf
, &wc
->masks
);
3284 bucket
= group_best_live_bucket(ctx
, group
, basis
);
3286 xlate_group_bucket(ctx
, bucket
);
3287 xlate_group_stats(ctx
, group
, bucket
);
3292 xlate_select_group(struct xlate_ctx
*ctx
, struct group_dpif
*group
)
3294 const char *selection_method
= group_dpif_get_selection_method(group
);
3296 if (selection_method
[0] == '\0') {
3297 xlate_default_select_group(ctx
, group
);
3298 } else if (!strcasecmp("hash", selection_method
)) {
3299 xlate_hash_fields_select_group(ctx
, group
);
3301 /* Parsing of groups should ensure this never happens */
3307 xlate_group_action__(struct xlate_ctx
*ctx
, struct group_dpif
*group
)
3309 ctx
->in_group
= true;
3311 switch (group_dpif_get_type(group
)) {
3313 case OFPGT11_INDIRECT
:
3314 xlate_all_group(ctx
, group
);
3316 case OFPGT11_SELECT
:
3317 xlate_select_group(ctx
, group
);
3320 xlate_ff_group(ctx
, group
);
3325 group_dpif_unref(group
);
3327 ctx
->in_group
= false;
3331 xlate_group_resource_check(struct xlate_ctx
*ctx
)
3333 if (!xlate_resubmit_resource_check(ctx
)) {
3335 } else if (ctx
->in_group
) {
3336 /* Prevent nested translation of OpenFlow groups.
3338 * OpenFlow allows this restriction. We enforce this restriction only
3339 * because, with the current architecture, we would otherwise have to
3340 * take a possibly recursive read lock on the ofgroup rwlock, which is
3341 * unsafe given that POSIX allows taking a read lock to block if there
3342 * is a thread blocked on taking the write lock. Other solutions
3343 * without this restriction are also possible, but seem unwarranted
3344 * given the current limited use of groups. */
3345 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(1, 1);
3347 VLOG_ERR_RL(&rl
, "cannot recursively translate OpenFlow group");
3355 xlate_group_action(struct xlate_ctx
*ctx
, uint32_t group_id
)
3357 if (xlate_group_resource_check(ctx
)) {
3358 struct group_dpif
*group
;
3361 got_group
= group_dpif_lookup(ctx
->xbridge
->ofproto
, group_id
, &group
);
3363 xlate_group_action__(ctx
, group
);
3373 xlate_ofpact_resubmit(struct xlate_ctx
*ctx
,
3374 const struct ofpact_resubmit
*resubmit
)
3378 bool may_packet_in
= false;
3379 bool honor_table_miss
= false;
3381 if (ctx
->rule
&& rule_dpif_is_internal(ctx
->rule
)) {
3382 /* Still allow missed packets to be sent to the controller
3383 * if resubmitting from an internal table. */
3384 may_packet_in
= true;
3385 honor_table_miss
= true;
3388 in_port
= resubmit
->in_port
;
3389 if (in_port
== OFPP_IN_PORT
) {
3390 in_port
= ctx
->xin
->flow
.in_port
.ofp_port
;
3393 table_id
= resubmit
->table_id
;
3394 if (table_id
== 255) {
3395 table_id
= ctx
->table_id
;
3398 xlate_table_action(ctx
, in_port
, table_id
, may_packet_in
,
3403 flood_packets(struct xlate_ctx
*ctx
, bool all
)
3405 const struct xport
*xport
;
3407 HMAP_FOR_EACH (xport
, ofp_node
, &ctx
->xbridge
->xports
) {
3408 if (xport
->ofp_port
== ctx
->xin
->flow
.in_port
.ofp_port
) {
3413 compose_output_action__(ctx
, xport
->ofp_port
, NULL
, false);
3414 } else if (!(xport
->config
& OFPUTIL_PC_NO_FLOOD
)) {
3415 compose_output_action(ctx
, xport
->ofp_port
, NULL
);
3419 ctx
->xout
->nf_output_iface
= NF_OUT_FLOOD
;
3423 execute_controller_action(struct xlate_ctx
*ctx
, int len
,
3424 enum ofp_packet_in_reason reason
,
3425 uint16_t controller_id
)
3427 struct ofproto_packet_in
*pin
;
3428 struct dp_packet
*packet
;
3430 ctx
->xout
->slow
|= SLOW_CONTROLLER
;
3431 if (!ctx
->xin
->packet
) {
3435 packet
= dp_packet_clone(ctx
->xin
->packet
);
3437 ctx
->xout
->slow
|= commit_odp_actions(&ctx
->xin
->flow
, &ctx
->base_flow
,
3438 ctx
->xout
->odp_actions
,
3440 ctx
->xbridge
->masked_set_action
);
3442 odp_execute_actions(NULL
, &packet
, 1, false,
3443 ctx
->xout
->odp_actions
->data
,
3444 ctx
->xout
->odp_actions
->size
, NULL
);
3446 pin
= xmalloc(sizeof *pin
);
3447 pin
->up
.packet_len
= dp_packet_size(packet
);
3448 pin
->up
.packet
= dp_packet_steal_data(packet
);
3449 pin
->up
.reason
= reason
;
3450 pin
->up
.table_id
= ctx
->table_id
;
3451 pin
->up
.cookie
= ctx
->rule_cookie
;
3453 flow_get_metadata(&ctx
->xin
->flow
, &pin
->up
.fmd
);
3455 pin
->controller_id
= controller_id
;
3456 pin
->send_len
= len
;
3457 /* If a rule is a table-miss rule then this is
3458 * a table-miss handled by a table-miss rule.
3460 * Else, if rule is internal and has a controller action,
3461 * the later being implied by the rule being processed here,
3462 * then this is a table-miss handled without a table-miss rule.
3464 * Otherwise this is not a table-miss. */
3465 pin
->miss_type
= OFPROTO_PACKET_IN_NO_MISS
;
3467 if (rule_dpif_is_table_miss(ctx
->rule
)) {
3468 pin
->miss_type
= OFPROTO_PACKET_IN_MISS_FLOW
;
3469 } else if (rule_dpif_is_internal(ctx
->rule
)) {
3470 pin
->miss_type
= OFPROTO_PACKET_IN_MISS_WITHOUT_FLOW
;
3473 ofproto_dpif_send_packet_in(ctx
->xbridge
->ofproto
, pin
);
3474 dp_packet_delete(packet
);
3477 /* Called only when ctx->recirc_action_offset is set. */
3479 compose_recirculate_action(struct xlate_ctx
*ctx
)
3481 struct recirc_metadata md
;
3484 ctx
->xout
->slow
|= commit_odp_actions(&ctx
->xin
->flow
, &ctx
->base_flow
,
3485 ctx
->xout
->odp_actions
,
3487 ctx
->xbridge
->masked_set_action
);
3489 recirc_metadata_from_flow(&md
, &ctx
->xin
->flow
);
3491 ovs_assert(ctx
->recirc_action_offset
>= 0);
3493 /* Only allocate recirculation ID if we have a packet. */
3494 if (ctx
->xin
->packet
) {
3495 /* Allocate a unique recirc id for the given metadata state in the
3496 * flow. The life-cycle of this recirc id is managed by associating it
3497 * with the udpif key ('ukey') created for each new datapath flow. */
3498 id
= recirc_alloc_id_ctx(ctx
->xbridge
->ofproto
, 0, &md
, &ctx
->stack
,
3499 ctx
->recirc_action_offset
,
3500 ctx
->action_set
.size
, ctx
->action_set
.data
);
3502 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(1, 5);
3503 VLOG_ERR_RL(&rl
, "Failed to allocate recirculation id");
3506 xlate_out_add_recirc(ctx
->xout
, id
);
3508 /* Look up an existing recirc id for the given metadata state in the
3509 * flow. No new reference is taken, as the ID is RCU protected and is
3510 * only required temporarily for verification. */
3511 id
= recirc_find_id(ctx
->xbridge
->ofproto
, 0, &md
, &ctx
->stack
,
3512 ctx
->recirc_action_offset
,
3513 ctx
->action_set
.size
, ctx
->action_set
.data
);
3514 /* We let zero 'id' to be used in the RECIRC action below, which will
3515 * fail all revalidations as zero is not a valid recirculation ID. */
3518 nl_msg_put_u32(ctx
->xout
->odp_actions
, OVS_ACTION_ATTR_RECIRC
, id
);
3520 /* Undo changes done by recirculation. */
3521 ctx
->action_set
.size
= ctx
->recirc_action_offset
;
3522 ctx
->recirc_action_offset
= -1;
3523 ctx
->last_unroll_offset
= -1;
3527 compose_mpls_push_action(struct xlate_ctx
*ctx
, struct ofpact_push_mpls
*mpls
)
3529 struct flow_wildcards
*wc
= &ctx
->xout
->wc
;
3530 struct flow
*flow
= &ctx
->xin
->flow
;
3533 ovs_assert(eth_type_mpls(mpls
->ethertype
));
3535 n
= flow_count_mpls_labels(flow
, wc
);
3537 ctx
->xout
->slow
|= commit_odp_actions(flow
, &ctx
->base_flow
,
3538 ctx
->xout
->odp_actions
,
3540 ctx
->xbridge
->masked_set_action
);
3541 } else if (n
>= FLOW_MAX_MPLS_LABELS
) {
3542 if (ctx
->xin
->packet
!= NULL
) {
3543 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(1, 5);
3544 VLOG_WARN_RL(&rl
, "bridge %s: dropping packet on which an "
3545 "MPLS push action can't be performed as it would "
3546 "have more MPLS LSEs than the %d supported.",
3547 ctx
->xbridge
->name
, FLOW_MAX_MPLS_LABELS
);
3553 flow_push_mpls(flow
, n
, mpls
->ethertype
, wc
);
3557 compose_mpls_pop_action(struct xlate_ctx
*ctx
, ovs_be16 eth_type
)
3559 struct flow_wildcards
*wc
= &ctx
->xout
->wc
;
3560 struct flow
*flow
= &ctx
->xin
->flow
;
3561 int n
= flow_count_mpls_labels(flow
, wc
);
3563 if (flow_pop_mpls(flow
, n
, eth_type
, wc
)) {
3564 if (ctx
->xbridge
->enable_recirc
) {
3565 ctx
->was_mpls
= true;
3567 } else if (n
>= FLOW_MAX_MPLS_LABELS
) {
3568 if (ctx
->xin
->packet
!= NULL
) {
3569 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(1, 5);
3570 VLOG_WARN_RL(&rl
, "bridge %s: dropping packet on which an "
3571 "MPLS pop action can't be performed as it has "
3572 "more MPLS LSEs than the %d supported.",
3573 ctx
->xbridge
->name
, FLOW_MAX_MPLS_LABELS
);
3576 ofpbuf_clear(ctx
->xout
->odp_actions
);
3581 compose_dec_ttl(struct xlate_ctx
*ctx
, struct ofpact_cnt_ids
*ids
)
3583 struct flow
*flow
= &ctx
->xin
->flow
;
3585 if (!is_ip_any(flow
)) {
3589 ctx
->xout
->wc
.masks
.nw_ttl
= 0xff;
3590 if (flow
->nw_ttl
> 1) {
3596 for (i
= 0; i
< ids
->n_controllers
; i
++) {
3597 execute_controller_action(ctx
, UINT16_MAX
, OFPR_INVALID_TTL
,
3601 /* Stop processing for current table. */
3607 compose_set_mpls_label_action(struct xlate_ctx
*ctx
, ovs_be32 label
)
3609 if (eth_type_mpls(ctx
->xin
->flow
.dl_type
)) {
3610 ctx
->xout
->wc
.masks
.mpls_lse
[0] |= htonl(MPLS_LABEL_MASK
);
3611 set_mpls_lse_label(&ctx
->xin
->flow
.mpls_lse
[0], label
);
3616 compose_set_mpls_tc_action(struct xlate_ctx
*ctx
, uint8_t tc
)
3618 if (eth_type_mpls(ctx
->xin
->flow
.dl_type
)) {
3619 ctx
->xout
->wc
.masks
.mpls_lse
[0] |= htonl(MPLS_TC_MASK
);
3620 set_mpls_lse_tc(&ctx
->xin
->flow
.mpls_lse
[0], tc
);
3625 compose_set_mpls_ttl_action(struct xlate_ctx
*ctx
, uint8_t ttl
)
3627 if (eth_type_mpls(ctx
->xin
->flow
.dl_type
)) {
3628 ctx
->xout
->wc
.masks
.mpls_lse
[0] |= htonl(MPLS_TTL_MASK
);
3629 set_mpls_lse_ttl(&ctx
->xin
->flow
.mpls_lse
[0], ttl
);
3634 compose_dec_mpls_ttl_action(struct xlate_ctx
*ctx
)
3636 struct flow
*flow
= &ctx
->xin
->flow
;
3637 struct flow_wildcards
*wc
= &ctx
->xout
->wc
;
3639 if (eth_type_mpls(flow
->dl_type
)) {
3640 uint8_t ttl
= mpls_lse_to_ttl(flow
->mpls_lse
[0]);
3642 wc
->masks
.mpls_lse
[0] |= htonl(MPLS_TTL_MASK
);
3645 set_mpls_lse_ttl(&flow
->mpls_lse
[0], ttl
);
3648 execute_controller_action(ctx
, UINT16_MAX
, OFPR_INVALID_TTL
, 0);
3652 /* Stop processing for current table. */
3657 xlate_output_action(struct xlate_ctx
*ctx
,
3658 ofp_port_t port
, uint16_t max_len
, bool may_packet_in
)
3660 ofp_port_t prev_nf_output_iface
= ctx
->xout
->nf_output_iface
;
3662 ctx
->xout
->nf_output_iface
= NF_OUT_DROP
;
3666 compose_output_action(ctx
, ctx
->xin
->flow
.in_port
.ofp_port
, NULL
);
3669 xlate_table_action(ctx
, ctx
->xin
->flow
.in_port
.ofp_port
,
3670 0, may_packet_in
, true);
3676 flood_packets(ctx
, false);
3679 flood_packets(ctx
, true);
3681 case OFPP_CONTROLLER
:
3682 execute_controller_action(ctx
, max_len
,
3683 (ctx
->in_group
? OFPR_GROUP
3684 : ctx
->in_action_set
? OFPR_ACTION_SET
3692 if (port
!= ctx
->xin
->flow
.in_port
.ofp_port
) {
3693 compose_output_action(ctx
, port
, NULL
);
3695 xlate_report(ctx
, "skipping output to input port");
3700 if (prev_nf_output_iface
== NF_OUT_FLOOD
) {
3701 ctx
->xout
->nf_output_iface
= NF_OUT_FLOOD
;
3702 } else if (ctx
->xout
->nf_output_iface
== NF_OUT_DROP
) {
3703 ctx
->xout
->nf_output_iface
= prev_nf_output_iface
;
3704 } else if (prev_nf_output_iface
!= NF_OUT_DROP
&&
3705 ctx
->xout
->nf_output_iface
!= NF_OUT_FLOOD
) {
3706 ctx
->xout
->nf_output_iface
= NF_OUT_MULTI
;
3711 xlate_output_reg_action(struct xlate_ctx
*ctx
,
3712 const struct ofpact_output_reg
*or)
3714 uint64_t port
= mf_get_subfield(&or->src
, &ctx
->xin
->flow
);
3715 if (port
<= UINT16_MAX
) {
3716 union mf_subvalue value
;
3718 memset(&value
, 0xff, sizeof value
);
3719 mf_write_subfield_flow(&or->src
, &value
, &ctx
->xout
->wc
.masks
);
3720 xlate_output_action(ctx
, u16_to_ofp(port
),
3721 or->max_len
, false);
3726 xlate_enqueue_action(struct xlate_ctx
*ctx
,
3727 const struct ofpact_enqueue
*enqueue
)
3729 ofp_port_t ofp_port
= enqueue
->port
;
3730 uint32_t queue_id
= enqueue
->queue
;
3731 uint32_t flow_priority
, priority
;
3734 /* Translate queue to priority. */
3735 error
= dpif_queue_to_priority(ctx
->xbridge
->dpif
, queue_id
, &priority
);
3737 /* Fall back to ordinary output action. */
3738 xlate_output_action(ctx
, enqueue
->port
, 0, false);
3742 /* Check output port. */
3743 if (ofp_port
== OFPP_IN_PORT
) {
3744 ofp_port
= ctx
->xin
->flow
.in_port
.ofp_port
;
3745 } else if (ofp_port
== ctx
->xin
->flow
.in_port
.ofp_port
) {
3749 /* Add datapath actions. */
3750 flow_priority
= ctx
->xin
->flow
.skb_priority
;
3751 ctx
->xin
->flow
.skb_priority
= priority
;
3752 compose_output_action(ctx
, ofp_port
, NULL
);
3753 ctx
->xin
->flow
.skb_priority
= flow_priority
;
3755 /* Update NetFlow output port. */
3756 if (ctx
->xout
->nf_output_iface
== NF_OUT_DROP
) {
3757 ctx
->xout
->nf_output_iface
= ofp_port
;
3758 } else if (ctx
->xout
->nf_output_iface
!= NF_OUT_FLOOD
) {
3759 ctx
->xout
->nf_output_iface
= NF_OUT_MULTI
;
3764 xlate_set_queue_action(struct xlate_ctx
*ctx
, uint32_t queue_id
)
3766 uint32_t skb_priority
;
3768 if (!dpif_queue_to_priority(ctx
->xbridge
->dpif
, queue_id
, &skb_priority
)) {
3769 ctx
->xin
->flow
.skb_priority
= skb_priority
;
3771 /* Couldn't translate queue to a priority. Nothing to do. A warning
3772 * has already been logged. */
3777 slave_enabled_cb(ofp_port_t ofp_port
, void *xbridge_
)
3779 const struct xbridge
*xbridge
= xbridge_
;
3790 case OFPP_CONTROLLER
: /* Not supported by the bundle action. */
3793 port
= get_ofp_port(xbridge
, ofp_port
);
3794 return port
? port
->may_enable
: false;
3799 xlate_bundle_action(struct xlate_ctx
*ctx
,
3800 const struct ofpact_bundle
*bundle
)
3804 port
= bundle_execute(bundle
, &ctx
->xin
->flow
, &ctx
->xout
->wc
,
3806 CONST_CAST(struct xbridge
*, ctx
->xbridge
));
3807 if (bundle
->dst
.field
) {
3808 nxm_reg_load(&bundle
->dst
, ofp_to_u16(port
), &ctx
->xin
->flow
,
3811 xlate_output_action(ctx
, port
, 0, false);
3816 xlate_learn_action__(struct xlate_ctx
*ctx
, const struct ofpact_learn
*learn
,
3817 struct ofputil_flow_mod
*fm
, struct ofpbuf
*ofpacts
)
3819 learn_execute(learn
, &ctx
->xin
->flow
, fm
, ofpacts
);
3820 if (ctx
->xin
->may_learn
) {
3821 ofproto_dpif_flow_mod(ctx
->xbridge
->ofproto
, fm
);
3826 xlate_learn_action(struct xlate_ctx
*ctx
, const struct ofpact_learn
*learn
)
3828 ctx
->xout
->has_learn
= true;
3829 learn_mask(learn
, &ctx
->xout
->wc
);
3831 if (ctx
->xin
->xcache
) {
3832 struct xc_entry
*entry
;
3834 entry
= xlate_cache_add_entry(ctx
->xin
->xcache
, XC_LEARN
);
3835 entry
->u
.learn
.ofproto
= ctx
->xbridge
->ofproto
;
3836 entry
->u
.learn
.fm
= xmalloc(sizeof *entry
->u
.learn
.fm
);
3837 entry
->u
.learn
.ofpacts
= ofpbuf_new(64);
3838 xlate_learn_action__(ctx
, learn
, entry
->u
.learn
.fm
,
3839 entry
->u
.learn
.ofpacts
);
3840 } else if (ctx
->xin
->may_learn
) {
3841 uint64_t ofpacts_stub
[1024 / 8];
3842 struct ofputil_flow_mod fm
;
3843 struct ofpbuf ofpacts
;
3845 ofpbuf_use_stub(&ofpacts
, ofpacts_stub
, sizeof ofpacts_stub
);
3846 xlate_learn_action__(ctx
, learn
, &fm
, &ofpacts
);
3847 ofpbuf_uninit(&ofpacts
);
3852 xlate_fin_timeout__(struct rule_dpif
*rule
, uint16_t tcp_flags
,
3853 uint16_t idle_timeout
, uint16_t hard_timeout
)
3855 if (tcp_flags
& (TCP_FIN
| TCP_RST
)) {
3856 rule_dpif_reduce_timeouts(rule
, idle_timeout
, hard_timeout
);
3861 xlate_fin_timeout(struct xlate_ctx
*ctx
,
3862 const struct ofpact_fin_timeout
*oft
)
3865 xlate_fin_timeout__(ctx
->rule
, ctx
->xin
->tcp_flags
,
3866 oft
->fin_idle_timeout
, oft
->fin_hard_timeout
);
3867 if (ctx
->xin
->xcache
) {
3868 struct xc_entry
*entry
;
3870 entry
= xlate_cache_add_entry(ctx
->xin
->xcache
, XC_FIN_TIMEOUT
);
3871 /* XC_RULE already holds a reference on the rule, none is taken
3873 entry
->u
.fin
.rule
= ctx
->rule
;
3874 entry
->u
.fin
.idle
= oft
->fin_idle_timeout
;
3875 entry
->u
.fin
.hard
= oft
->fin_hard_timeout
;
3881 xlate_sample_action(struct xlate_ctx
*ctx
,
3882 const struct ofpact_sample
*os
)
3884 union user_action_cookie cookie
;
3885 /* Scale the probability from 16-bit to 32-bit while representing
3886 * the same percentage. */
3887 uint32_t probability
= (os
->probability
<< 16) | os
->probability
;
3889 if (!ctx
->xbridge
->variable_length_userdata
) {
3890 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(1, 1);
3892 VLOG_ERR_RL(&rl
, "ignoring NXAST_SAMPLE action because datapath "
3893 "lacks support (needs Linux 3.10+ or kernel module from "
3898 ctx
->xout
->slow
|= commit_odp_actions(&ctx
->xin
->flow
, &ctx
->base_flow
,
3899 ctx
->xout
->odp_actions
,
3901 ctx
->xbridge
->masked_set_action
);
3903 compose_flow_sample_cookie(os
->probability
, os
->collector_set_id
,
3904 os
->obs_domain_id
, os
->obs_point_id
, &cookie
);
3905 compose_sample_action(ctx
->xbridge
, ctx
->xout
->odp_actions
,
3906 &ctx
->xin
->flow
, probability
, &cookie
,
3907 sizeof cookie
.flow_sample
, ODPP_NONE
);
3911 may_receive(const struct xport
*xport
, struct xlate_ctx
*ctx
)
3913 if (xport
->config
& (is_stp(&ctx
->xin
->flow
)
3914 ? OFPUTIL_PC_NO_RECV_STP
3915 : OFPUTIL_PC_NO_RECV
)) {
3919 /* Only drop packets here if both forwarding and learning are
3920 * disabled. If just learning is enabled, we need to have
3921 * OFPP_NORMAL and the learning action have a look at the packet
3922 * before we can drop it. */
3923 if ((!xport_stp_forward_state(xport
) && !xport_stp_learn_state(xport
)) ||
3924 (!xport_rstp_forward_state(xport
) && !xport_rstp_learn_state(xport
))) {
3932 xlate_write_actions(struct xlate_ctx
*ctx
, const struct ofpact
*a
)
3934 const struct ofpact_nest
*on
= ofpact_get_WRITE_ACTIONS(a
);
3935 size_t on_len
= ofpact_nest_get_action_len(on
);
3936 const struct ofpact
*inner
;
3938 /* Maintain actset_output depending on the contents of the action set:
3940 * - OFPP_UNSET, if there is no "output" action.
3942 * - The output port, if there is an "output" action and no "group"
3945 * - OFPP_UNSET, if there is a "group" action.
3947 if (!ctx
->action_set_has_group
) {
3948 OFPACT_FOR_EACH (inner
, on
->actions
, on_len
) {
3949 if (inner
->type
== OFPACT_OUTPUT
) {
3950 ctx
->xin
->flow
.actset_output
= ofpact_get_OUTPUT(inner
)->port
;
3951 } else if (inner
->type
== OFPACT_GROUP
) {
3952 ctx
->xin
->flow
.actset_output
= OFPP_UNSET
;
3953 ctx
->action_set_has_group
= true;
3958 ofpbuf_put(&ctx
->action_set
, on
->actions
, on_len
);
3959 ofpact_pad(&ctx
->action_set
);
3963 xlate_action_set(struct xlate_ctx
*ctx
)
3965 uint64_t action_list_stub
[1024 / 64];
3966 struct ofpbuf action_list
;
3968 ctx
->in_action_set
= true;
3969 ofpbuf_use_stub(&action_list
, action_list_stub
, sizeof action_list_stub
);
3970 ofpacts_execute_action_set(&action_list
, &ctx
->action_set
);
3971 /* Clear the action set, as it is not needed any more. */
3972 ofpbuf_clear(&ctx
->action_set
);
3973 do_xlate_actions(action_list
.data
, action_list
.size
, ctx
);
3974 ctx
->in_action_set
= false;
3975 ofpbuf_uninit(&action_list
);
3979 recirc_put_unroll_xlate(struct xlate_ctx
*ctx
)
3981 struct ofpact_unroll_xlate
*unroll
;
3983 unroll
= ctx
->last_unroll_offset
< 0
3985 : ALIGNED_CAST(struct ofpact_unroll_xlate
*,
3986 (char *)ctx
->action_set
.data
+ ctx
->last_unroll_offset
);
3988 /* Restore the table_id and rule cookie for a potential PACKET
3991 (ctx
->table_id
!= unroll
->rule_table_id
3992 || ctx
->rule_cookie
!= unroll
->rule_cookie
)) {
3994 ctx
->last_unroll_offset
= ctx
->action_set
.size
;
3995 unroll
= ofpact_put_UNROLL_XLATE(&ctx
->action_set
);
3996 unroll
->rule_table_id
= ctx
->table_id
;
3997 unroll
->rule_cookie
= ctx
->rule_cookie
;
4002 /* Copy remaining actions to the action_set to be executed after recirculation.
4003 * UNROLL_XLATE action is inserted, if not already done so, before actions that
4004 * may generate PACKET_INs from the current table and without matching another
4007 recirc_unroll_actions(const struct ofpact
*ofpacts
, size_t ofpacts_len
,
4008 struct xlate_ctx
*ctx
)
4010 const struct ofpact
*a
;
4012 OFPACT_FOR_EACH (a
, ofpacts
, ofpacts_len
) {
4014 /* May generate PACKET INs. */
4015 case OFPACT_OUTPUT_REG
:
4018 case OFPACT_CONTROLLER
:
4019 case OFPACT_DEC_MPLS_TTL
:
4020 case OFPACT_DEC_TTL
:
4021 recirc_put_unroll_xlate(ctx
);
4024 /* These may not generate PACKET INs. */
4025 case OFPACT_SET_TUNNEL
:
4026 case OFPACT_REG_MOVE
:
4027 case OFPACT_SET_FIELD
:
4028 case OFPACT_STACK_PUSH
:
4029 case OFPACT_STACK_POP
:
4031 case OFPACT_WRITE_METADATA
:
4032 case OFPACT_RESUBMIT
: /* May indirectly generate PACKET INs, */
4033 case OFPACT_GOTO_TABLE
: /* but from a different table and rule. */
4034 case OFPACT_ENQUEUE
:
4035 case OFPACT_SET_VLAN_VID
:
4036 case OFPACT_SET_VLAN_PCP
:
4037 case OFPACT_STRIP_VLAN
:
4038 case OFPACT_PUSH_VLAN
:
4039 case OFPACT_SET_ETH_SRC
:
4040 case OFPACT_SET_ETH_DST
:
4041 case OFPACT_SET_IPV4_SRC
:
4042 case OFPACT_SET_IPV4_DST
:
4043 case OFPACT_SET_IP_DSCP
:
4044 case OFPACT_SET_IP_ECN
:
4045 case OFPACT_SET_IP_TTL
:
4046 case OFPACT_SET_L4_SRC_PORT
:
4047 case OFPACT_SET_L4_DST_PORT
:
4048 case OFPACT_SET_QUEUE
:
4049 case OFPACT_POP_QUEUE
:
4050 case OFPACT_PUSH_MPLS
:
4051 case OFPACT_POP_MPLS
:
4052 case OFPACT_SET_MPLS_LABEL
:
4053 case OFPACT_SET_MPLS_TC
:
4054 case OFPACT_SET_MPLS_TTL
:
4055 case OFPACT_MULTIPATH
:
4058 case OFPACT_UNROLL_XLATE
:
4059 case OFPACT_FIN_TIMEOUT
:
4060 case OFPACT_CLEAR_ACTIONS
:
4061 case OFPACT_WRITE_ACTIONS
:
4066 /* These need not be copied for restoration. */
4068 case OFPACT_CONJUNCTION
:
4071 /* Copy the action over. */
4072 ofpbuf_put(&ctx
->action_set
, a
, OFPACT_ALIGN(a
->len
));
4076 #define CHECK_MPLS_RECIRCULATION() \
4077 if (ctx->was_mpls) { \
4078 ctx_trigger_recirculation(ctx); \
4081 #define CHECK_MPLS_RECIRCULATION_IF(COND) \
4083 CHECK_MPLS_RECIRCULATION(); \
4087 do_xlate_actions(const struct ofpact
*ofpacts
, size_t ofpacts_len
,
4088 struct xlate_ctx
*ctx
)
4090 struct flow_wildcards
*wc
= &ctx
->xout
->wc
;
4091 struct flow
*flow
= &ctx
->xin
->flow
;
4092 const struct ofpact
*a
;
4094 if (ovs_native_tunneling_is_on(ctx
->xbridge
->ofproto
)) {
4095 tnl_arp_snoop(flow
, wc
, ctx
->xbridge
->name
);
4097 /* dl_type already in the mask, not set below. */
4099 OFPACT_FOR_EACH (a
, ofpacts
, ofpacts_len
) {
4100 struct ofpact_controller
*controller
;
4101 const struct ofpact_metadata
*metadata
;
4102 const struct ofpact_set_field
*set_field
;
4103 const struct mf_field
*mf
;
4106 /* Check if need to store the remaining actions for later
4108 if (exit_recirculates(ctx
)) {
4109 recirc_unroll_actions(a
, OFPACT_ALIGN(ofpacts_len
-
4111 (uint8_t *)ofpacts
)),
4119 xlate_output_action(ctx
, ofpact_get_OUTPUT(a
)->port
,
4120 ofpact_get_OUTPUT(a
)->max_len
, true);
4124 if (xlate_group_action(ctx
, ofpact_get_GROUP(a
)->group_id
)) {
4125 /* Group could not be found. */
4130 case OFPACT_CONTROLLER
:
4131 controller
= ofpact_get_CONTROLLER(a
);
4132 execute_controller_action(ctx
, controller
->max_len
,
4134 controller
->controller_id
);
4137 case OFPACT_ENQUEUE
:
4138 memset(&wc
->masks
.skb_priority
, 0xff,
4139 sizeof wc
->masks
.skb_priority
);
4140 xlate_enqueue_action(ctx
, ofpact_get_ENQUEUE(a
));
4143 case OFPACT_SET_VLAN_VID
:
4144 wc
->masks
.vlan_tci
|= htons(VLAN_VID_MASK
| VLAN_CFI
);
4145 if (flow
->vlan_tci
& htons(VLAN_CFI
) ||
4146 ofpact_get_SET_VLAN_VID(a
)->push_vlan_if_needed
) {
4147 flow
->vlan_tci
&= ~htons(VLAN_VID_MASK
);
4148 flow
->vlan_tci
|= (htons(ofpact_get_SET_VLAN_VID(a
)->vlan_vid
)
4153 case OFPACT_SET_VLAN_PCP
:
4154 wc
->masks
.vlan_tci
|= htons(VLAN_PCP_MASK
| VLAN_CFI
);
4155 if (flow
->vlan_tci
& htons(VLAN_CFI
) ||
4156 ofpact_get_SET_VLAN_PCP(a
)->push_vlan_if_needed
) {
4157 flow
->vlan_tci
&= ~htons(VLAN_PCP_MASK
);
4158 flow
->vlan_tci
|= htons((ofpact_get_SET_VLAN_PCP(a
)->vlan_pcp
4159 << VLAN_PCP_SHIFT
) | VLAN_CFI
);
4163 case OFPACT_STRIP_VLAN
:
4164 memset(&wc
->masks
.vlan_tci
, 0xff, sizeof wc
->masks
.vlan_tci
);
4165 flow
->vlan_tci
= htons(0);
4168 case OFPACT_PUSH_VLAN
:
4169 /* XXX 802.1AD(QinQ) */
4170 memset(&wc
->masks
.vlan_tci
, 0xff, sizeof wc
->masks
.vlan_tci
);
4171 flow
->vlan_tci
= htons(VLAN_CFI
);
4174 case OFPACT_SET_ETH_SRC
:
4175 memset(&wc
->masks
.dl_src
, 0xff, sizeof wc
->masks
.dl_src
);
4176 memcpy(flow
->dl_src
, ofpact_get_SET_ETH_SRC(a
)->mac
, ETH_ADDR_LEN
);
4179 case OFPACT_SET_ETH_DST
:
4180 memset(&wc
->masks
.dl_dst
, 0xff, sizeof wc
->masks
.dl_dst
);
4181 memcpy(flow
->dl_dst
, ofpact_get_SET_ETH_DST(a
)->mac
, ETH_ADDR_LEN
);
4184 case OFPACT_SET_IPV4_SRC
:
4185 CHECK_MPLS_RECIRCULATION();
4186 if (flow
->dl_type
== htons(ETH_TYPE_IP
)) {
4187 memset(&wc
->masks
.nw_src
, 0xff, sizeof wc
->masks
.nw_src
);
4188 flow
->nw_src
= ofpact_get_SET_IPV4_SRC(a
)->ipv4
;
4192 case OFPACT_SET_IPV4_DST
:
4193 CHECK_MPLS_RECIRCULATION();
4194 if (flow
->dl_type
== htons(ETH_TYPE_IP
)) {
4195 memset(&wc
->masks
.nw_dst
, 0xff, sizeof wc
->masks
.nw_dst
);
4196 flow
->nw_dst
= ofpact_get_SET_IPV4_DST(a
)->ipv4
;
4200 case OFPACT_SET_IP_DSCP
:
4201 CHECK_MPLS_RECIRCULATION();
4202 if (is_ip_any(flow
)) {
4203 wc
->masks
.nw_tos
|= IP_DSCP_MASK
;
4204 flow
->nw_tos
&= ~IP_DSCP_MASK
;
4205 flow
->nw_tos
|= ofpact_get_SET_IP_DSCP(a
)->dscp
;
4209 case OFPACT_SET_IP_ECN
:
4210 CHECK_MPLS_RECIRCULATION();
4211 if (is_ip_any(flow
)) {
4212 wc
->masks
.nw_tos
|= IP_ECN_MASK
;
4213 flow
->nw_tos
&= ~IP_ECN_MASK
;
4214 flow
->nw_tos
|= ofpact_get_SET_IP_ECN(a
)->ecn
;
4218 case OFPACT_SET_IP_TTL
:
4219 CHECK_MPLS_RECIRCULATION();
4220 if (is_ip_any(flow
)) {
4221 wc
->masks
.nw_ttl
= 0xff;
4222 flow
->nw_ttl
= ofpact_get_SET_IP_TTL(a
)->ttl
;
4226 case OFPACT_SET_L4_SRC_PORT
:
4227 CHECK_MPLS_RECIRCULATION();
4228 if (is_ip_any(flow
) && !(flow
->nw_frag
& FLOW_NW_FRAG_LATER
)) {
4229 memset(&wc
->masks
.nw_proto
, 0xff, sizeof wc
->masks
.nw_proto
);
4230 memset(&wc
->masks
.tp_src
, 0xff, sizeof wc
->masks
.tp_src
);
4231 flow
->tp_src
= htons(ofpact_get_SET_L4_SRC_PORT(a
)->port
);
4235 case OFPACT_SET_L4_DST_PORT
:
4236 CHECK_MPLS_RECIRCULATION();
4237 if (is_ip_any(flow
) && !(flow
->nw_frag
& FLOW_NW_FRAG_LATER
)) {
4238 memset(&wc
->masks
.nw_proto
, 0xff, sizeof wc
->masks
.nw_proto
);
4239 memset(&wc
->masks
.tp_dst
, 0xff, sizeof wc
->masks
.tp_dst
);
4240 flow
->tp_dst
= htons(ofpact_get_SET_L4_DST_PORT(a
)->port
);
4244 case OFPACT_RESUBMIT
:
4245 xlate_ofpact_resubmit(ctx
, ofpact_get_RESUBMIT(a
));
4248 case OFPACT_SET_TUNNEL
:
4249 flow
->tunnel
.tun_id
= htonll(ofpact_get_SET_TUNNEL(a
)->tun_id
);
4252 case OFPACT_SET_QUEUE
:
4253 memset(&wc
->masks
.skb_priority
, 0xff,
4254 sizeof wc
->masks
.skb_priority
);
4255 xlate_set_queue_action(ctx
, ofpact_get_SET_QUEUE(a
)->queue_id
);
4258 case OFPACT_POP_QUEUE
:
4259 memset(&wc
->masks
.skb_priority
, 0xff,
4260 sizeof wc
->masks
.skb_priority
);
4261 flow
->skb_priority
= ctx
->orig_skb_priority
;
4264 case OFPACT_REG_MOVE
:
4265 CHECK_MPLS_RECIRCULATION_IF(
4266 mf_is_l3_or_higher(ofpact_get_REG_MOVE(a
)->dst
.field
) ||
4267 mf_is_l3_or_higher(ofpact_get_REG_MOVE(a
)->src
.field
));
4268 nxm_execute_reg_move(ofpact_get_REG_MOVE(a
), flow
, wc
);
4271 case OFPACT_SET_FIELD
:
4272 CHECK_MPLS_RECIRCULATION_IF(
4273 mf_is_l3_or_higher(ofpact_get_SET_FIELD(a
)->field
));
4274 set_field
= ofpact_get_SET_FIELD(a
);
4275 mf
= set_field
->field
;
4277 /* Set field action only ever overwrites packet's outermost
4278 * applicable header fields. Do nothing if no header exists. */
4279 if (mf
->id
== MFF_VLAN_VID
) {
4280 wc
->masks
.vlan_tci
|= htons(VLAN_CFI
);
4281 if (!(flow
->vlan_tci
& htons(VLAN_CFI
))) {
4284 } else if ((mf
->id
== MFF_MPLS_LABEL
|| mf
->id
== MFF_MPLS_TC
)
4285 /* 'dl_type' is already unwildcarded. */
4286 && !eth_type_mpls(flow
->dl_type
)) {
4289 /* A flow may wildcard nw_frag. Do nothing if setting a trasport
4290 * header field on a packet that does not have them. */
4291 mf_mask_field_and_prereqs(mf
, &wc
->masks
);
4292 if (mf_are_prereqs_ok(mf
, flow
)) {
4293 mf_set_flow_value_masked(mf
, &set_field
->value
,
4294 &set_field
->mask
, flow
);
4298 case OFPACT_STACK_PUSH
:
4299 CHECK_MPLS_RECIRCULATION_IF(
4300 mf_is_l3_or_higher(ofpact_get_STACK_PUSH(a
)->subfield
.field
));
4301 nxm_execute_stack_push(ofpact_get_STACK_PUSH(a
), flow
, wc
,
4305 case OFPACT_STACK_POP
:
4306 CHECK_MPLS_RECIRCULATION_IF(
4307 mf_is_l3_or_higher(ofpact_get_STACK_POP(a
)->subfield
.field
));
4308 nxm_execute_stack_pop(ofpact_get_STACK_POP(a
), flow
, wc
,
4312 case OFPACT_PUSH_MPLS
:
4313 /* Recirculate if it is an IP packet with a zero ttl. This may
4314 * indicate that the packet was previously MPLS and an MPLS pop
4315 * action converted it to IP. In this case recirculating should
4316 * reveal the IP TTL which is used as the basis for a new MPLS
4318 CHECK_MPLS_RECIRCULATION_IF(
4319 !flow_count_mpls_labels(flow
, wc
)
4320 && flow
->nw_ttl
== 0
4321 && is_ip_any(flow
));
4322 compose_mpls_push_action(ctx
, ofpact_get_PUSH_MPLS(a
));
4325 case OFPACT_POP_MPLS
:
4326 CHECK_MPLS_RECIRCULATION();
4327 compose_mpls_pop_action(ctx
, ofpact_get_POP_MPLS(a
)->ethertype
);
4330 case OFPACT_SET_MPLS_LABEL
:
4331 CHECK_MPLS_RECIRCULATION();
4332 compose_set_mpls_label_action(
4333 ctx
, ofpact_get_SET_MPLS_LABEL(a
)->label
);
4336 case OFPACT_SET_MPLS_TC
:
4337 CHECK_MPLS_RECIRCULATION();
4338 compose_set_mpls_tc_action(ctx
, ofpact_get_SET_MPLS_TC(a
)->tc
);
4341 case OFPACT_SET_MPLS_TTL
:
4342 CHECK_MPLS_RECIRCULATION();
4343 compose_set_mpls_ttl_action(ctx
, ofpact_get_SET_MPLS_TTL(a
)->ttl
);
4346 case OFPACT_DEC_MPLS_TTL
:
4347 CHECK_MPLS_RECIRCULATION();
4348 if (compose_dec_mpls_ttl_action(ctx
)) {
4353 case OFPACT_DEC_TTL
:
4354 CHECK_MPLS_RECIRCULATION();
4355 wc
->masks
.nw_ttl
= 0xff;
4356 if (compose_dec_ttl(ctx
, ofpact_get_DEC_TTL(a
))) {
4362 /* Nothing to do. */
4365 case OFPACT_MULTIPATH
:
4366 CHECK_MPLS_RECIRCULATION();
4367 multipath_execute(ofpact_get_MULTIPATH(a
), flow
, wc
);
4371 CHECK_MPLS_RECIRCULATION();
4372 xlate_bundle_action(ctx
, ofpact_get_BUNDLE(a
));
4375 case OFPACT_OUTPUT_REG
:
4376 xlate_output_reg_action(ctx
, ofpact_get_OUTPUT_REG(a
));
4380 CHECK_MPLS_RECIRCULATION();
4381 xlate_learn_action(ctx
, ofpact_get_LEARN(a
));
4384 case OFPACT_CONJUNCTION
: {
4385 /* A flow with a "conjunction" action represents part of a special
4386 * kind of "set membership match". Such a flow should not actually
4387 * get executed, but it could via, say, a "packet-out", even though
4388 * that wouldn't be useful. Log it to help debugging. */
4389 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(1, 1);
4390 VLOG_INFO_RL(&rl
, "executing no-op conjunction action");
4398 case OFPACT_UNROLL_XLATE
: {
4399 struct ofpact_unroll_xlate
*unroll
= ofpact_get_UNROLL_XLATE(a
);
4401 /* Restore translation context data that was stored earlier. */
4402 ctx
->table_id
= unroll
->rule_table_id
;
4403 ctx
->rule_cookie
= unroll
->rule_cookie
;
4406 case OFPACT_FIN_TIMEOUT
:
4407 CHECK_MPLS_RECIRCULATION();
4408 memset(&wc
->masks
.nw_proto
, 0xff, sizeof wc
->masks
.nw_proto
);
4409 ctx
->xout
->has_fin_timeout
= true;
4410 xlate_fin_timeout(ctx
, ofpact_get_FIN_TIMEOUT(a
));
4413 case OFPACT_CLEAR_ACTIONS
:
4414 ofpbuf_clear(&ctx
->action_set
);
4415 ctx
->xin
->flow
.actset_output
= OFPP_UNSET
;
4416 ctx
->action_set_has_group
= false;
4419 case OFPACT_WRITE_ACTIONS
:
4420 xlate_write_actions(ctx
, a
);
4423 case OFPACT_WRITE_METADATA
:
4424 metadata
= ofpact_get_WRITE_METADATA(a
);
4425 flow
->metadata
&= ~metadata
->mask
;
4426 flow
->metadata
|= metadata
->metadata
& metadata
->mask
;
4430 /* Not implemented yet. */
4433 case OFPACT_GOTO_TABLE
: {
4434 struct ofpact_goto_table
*ogt
= ofpact_get_GOTO_TABLE(a
);
4436 /* Allow ctx->table_id == TBL_INTERNAL, which will be greater
4437 * than ogt->table_id. This is to allow goto_table actions that
4438 * triggered recirculation: ctx->table_id will be TBL_INTERNAL
4439 * after recirculation. */
4440 ovs_assert(ctx
->table_id
== TBL_INTERNAL
4441 || ctx
->table_id
< ogt
->table_id
);
4442 xlate_table_action(ctx
, ctx
->xin
->flow
.in_port
.ofp_port
,
4443 ogt
->table_id
, true, true);
4448 xlate_sample_action(ctx
, ofpact_get_SAMPLE(a
));
4452 /* Check if need to store this and the remaining actions for later
4454 if (ctx
->exit
&& ctx_first_recirculation_action(ctx
)) {
4455 recirc_unroll_actions(a
, OFPACT_ALIGN(ofpacts_len
-
4457 (uint8_t *)ofpacts
)),
4465 xlate_in_init(struct xlate_in
*xin
, struct ofproto_dpif
*ofproto
,
4466 const struct flow
*flow
, ofp_port_t in_port
,
4467 struct rule_dpif
*rule
, uint16_t tcp_flags
,
4468 const struct dp_packet
*packet
)
4470 xin
->ofproto
= ofproto
;
4472 xin
->flow
.in_port
.ofp_port
= in_port
;
4473 xin
->flow
.actset_output
= OFPP_UNSET
;
4474 xin
->packet
= packet
;
4475 xin
->may_learn
= packet
!= NULL
;
4478 xin
->ofpacts
= NULL
;
4479 xin
->ofpacts_len
= 0;
4480 xin
->tcp_flags
= tcp_flags
;
4481 xin
->resubmit_hook
= NULL
;
4482 xin
->report_hook
= NULL
;
4483 xin
->resubmit_stats
= NULL
;
4484 xin
->skip_wildcards
= false;
4485 xin
->odp_actions
= NULL
;
4487 /* Do recirc lookup. */
4488 xin
->recirc
= flow
->recirc_id
4489 ? recirc_id_node_find(flow
->recirc_id
)
4494 xlate_out_uninit(struct xlate_out
*xout
)
4497 if (xout
->odp_actions
== &xout
->odp_actions_buf
) {
4498 ofpbuf_uninit(xout
->odp_actions
);
4500 xlate_out_free_recircs(xout
);
4504 /* Translates the 'ofpacts_len' bytes of "struct ofpact"s starting at 'ofpacts'
4505 * into datapath actions, using 'ctx', and discards the datapath actions. */
4507 xlate_actions_for_side_effects(struct xlate_in
*xin
)
4509 struct xlate_out xout
;
4511 xlate_actions(xin
, &xout
);
4512 xlate_out_uninit(&xout
);
4516 xlate_out_copy(struct xlate_out
*dst
, const struct xlate_out
*src
)
4519 dst
->slow
= src
->slow
;
4520 dst
->has_learn
= src
->has_learn
;
4521 dst
->has_normal
= src
->has_normal
;
4522 dst
->has_fin_timeout
= src
->has_fin_timeout
;
4523 dst
->nf_output_iface
= src
->nf_output_iface
;
4524 dst
->mirrors
= src
->mirrors
;
4526 dst
->odp_actions
= &dst
->odp_actions_buf
;
4527 ofpbuf_use_stub(dst
->odp_actions
, dst
->odp_actions_stub
,
4528 sizeof dst
->odp_actions_stub
);
4529 ofpbuf_put(dst
->odp_actions
, src
->odp_actions
->data
, src
->odp_actions
->size
);
4532 static struct skb_priority_to_dscp
*
4533 get_skb_priority(const struct xport
*xport
, uint32_t skb_priority
)
4535 struct skb_priority_to_dscp
*pdscp
;
4538 hash
= hash_int(skb_priority
, 0);
4539 HMAP_FOR_EACH_IN_BUCKET (pdscp
, hmap_node
, hash
, &xport
->skb_priorities
) {
4540 if (pdscp
->skb_priority
== skb_priority
) {
4548 dscp_from_skb_priority(const struct xport
*xport
, uint32_t skb_priority
,
4551 struct skb_priority_to_dscp
*pdscp
= get_skb_priority(xport
, skb_priority
);
4552 *dscp
= pdscp
? pdscp
->dscp
: 0;
4553 return pdscp
!= NULL
;
4557 count_skb_priorities(const struct xport
*xport
)
4559 return hmap_count(&xport
->skb_priorities
);
4563 clear_skb_priorities(struct xport
*xport
)
4565 struct skb_priority_to_dscp
*pdscp
, *next
;
4567 HMAP_FOR_EACH_SAFE (pdscp
, next
, hmap_node
, &xport
->skb_priorities
) {
4568 hmap_remove(&xport
->skb_priorities
, &pdscp
->hmap_node
);
4574 actions_output_to_local_port(const struct xlate_ctx
*ctx
)
4576 odp_port_t local_odp_port
= ofp_port_to_odp_port(ctx
->xbridge
, OFPP_LOCAL
);
4577 const struct nlattr
*a
;
4580 NL_ATTR_FOR_EACH_UNSAFE (a
, left
, ctx
->xout
->odp_actions
->data
,
4581 ctx
->xout
->odp_actions
->size
) {
4582 if (nl_attr_type(a
) == OVS_ACTION_ATTR_OUTPUT
4583 && nl_attr_get_odp_port(a
) == local_odp_port
) {
4590 #if defined(__linux__)
4591 /* Returns the maximum number of packets that the Linux kernel is willing to
4592 * queue up internally to certain kinds of software-implemented ports, or the
4593 * default (and rarely modified) value if it cannot be determined. */
4595 netdev_max_backlog(void)
4597 static struct ovsthread_once once
= OVSTHREAD_ONCE_INITIALIZER
;
4598 static int max_backlog
= 1000; /* The normal default value. */
4600 if (ovsthread_once_start(&once
)) {
4601 static const char filename
[] = "/proc/sys/net/core/netdev_max_backlog";
4605 stream
= fopen(filename
, "r");
4607 VLOG_WARN("%s: open failed (%s)", filename
, ovs_strerror(errno
));
4609 if (fscanf(stream
, "%d", &n
) != 1) {
4610 VLOG_WARN("%s: read error", filename
);
4611 } else if (n
<= 100) {
4612 VLOG_WARN("%s: unexpectedly small value %d", filename
, n
);
4618 ovsthread_once_done(&once
);
4620 VLOG_DBG("%s: using %d max_backlog", filename
, max_backlog
);
4626 /* Counts and returns the number of OVS_ACTION_ATTR_OUTPUT actions in
4629 count_output_actions(const struct ofpbuf
*odp_actions
)
4631 const struct nlattr
*a
;
4635 NL_ATTR_FOR_EACH_UNSAFE (a
, left
, odp_actions
->data
, odp_actions
->size
) {
4636 if (a
->nla_type
== OVS_ACTION_ATTR_OUTPUT
) {
4642 #endif /* defined(__linux__) */
4644 /* Returns true if 'odp_actions' contains more output actions than the datapath
4645 * can reliably handle in one go. On Linux, this is the value of the
4646 * net.core.netdev_max_backlog sysctl, which limits the maximum number of
4647 * packets that the kernel is willing to queue up for processing while the
4648 * datapath is processing a set of actions. */
4650 too_many_output_actions(const struct ofpbuf
*odp_actions OVS_UNUSED
)
4653 return (odp_actions
->size
/ NL_A_U32_SIZE
> netdev_max_backlog()
4654 && count_output_actions(odp_actions
) > netdev_max_backlog());
4656 /* OSes other than Linux might have similar limits, but we don't know how
4657 * to determine them.*/
4662 /* Translates the flow, actions, or rule in 'xin' into datapath actions in
4664 * The caller must take responsibility for eventually freeing 'xout', with
4665 * xlate_out_uninit(). */
4667 xlate_actions(struct xlate_in
*xin
, struct xlate_out
*xout
)
4669 struct xlate_cfg
*xcfg
= ovsrcu_get(struct xlate_cfg
*, &xcfgp
);
4670 struct flow_wildcards
*wc
= NULL
;
4671 struct flow
*flow
= &xin
->flow
;
4672 struct rule_dpif
*rule
= NULL
;
4674 enum slow_path_reason special
;
4675 const struct ofpact
*ofpacts
;
4676 struct xbridge
*xbridge
;
4677 struct xport
*in_port
;
4678 struct flow orig_flow
;
4679 struct xlate_ctx ctx
;
4684 COVERAGE_INC(xlate_actions
);
4686 /* Flow initialization rules:
4687 * - 'base_flow' must match the kernel's view of the packet at the
4688 * time that action processing starts. 'flow' represents any
4689 * transformations we wish to make through actions.
4690 * - By default 'base_flow' and 'flow' are the same since the input
4691 * packet matches the output before any actions are applied.
4692 * - When using VLAN splinters, 'base_flow''s VLAN is set to the value
4693 * of the received packet as seen by the kernel. If we later output
4694 * to another device without any modifications this will cause us to
4695 * insert a new tag since the original one was stripped off by the
4697 * - Tunnel metadata as received is retained in 'flow'. This allows
4698 * tunnel metadata matching also in later tables.
4699 * Since a kernel action for setting the tunnel metadata will only be
4700 * generated with actual tunnel output, changing the tunnel metadata
4701 * values in 'flow' (such as tun_id) will only have effect with a later
4702 * tunnel output action.
4703 * - Tunnel 'base_flow' is completely cleared since that is what the
4704 * kernel does. If we wish to maintain the original values an action
4705 * needs to be generated. */
4710 ctx
.xout
->has_learn
= false;
4711 ctx
.xout
->has_normal
= false;
4712 ctx
.xout
->has_fin_timeout
= false;
4713 ctx
.xout
->nf_output_iface
= NF_OUT_DROP
;
4714 ctx
.xout
->mirrors
= 0;
4715 ctx
.xout
->n_recircs
= 0;
4717 xout
->odp_actions
= xin
->odp_actions
;
4718 if (!xout
->odp_actions
) {
4719 xout
->odp_actions
= &xout
->odp_actions_buf
;
4720 ofpbuf_use_stub(xout
->odp_actions
, xout
->odp_actions_stub
,
4721 sizeof xout
->odp_actions_stub
);
4723 ofpbuf_reserve(xout
->odp_actions
, NL_A_U32_SIZE
);
4725 xbridge
= xbridge_lookup(xcfg
, xin
->ofproto
);
4729 /* 'ctx.xbridge' may be changed by action processing, whereas 'xbridge'
4730 * will remain set on the original input bridge. */
4731 ctx
.xbridge
= xbridge
;
4732 ctx
.rule
= xin
->rule
;
4734 ctx
.base_flow
= *flow
;
4735 memset(&ctx
.base_flow
.tunnel
, 0, sizeof ctx
.base_flow
.tunnel
);
4736 ctx
.orig_tunnel_ip_dst
= flow
->tunnel
.ip_dst
;
4738 if (!xin
->skip_wildcards
) {
4740 flow_wildcards_init_catchall(wc
);
4741 memset(&wc
->masks
.in_port
, 0xff, sizeof wc
->masks
.in_port
);
4742 memset(&wc
->masks
.dl_type
, 0xff, sizeof wc
->masks
.dl_type
);
4743 if (is_ip_any(flow
)) {
4744 wc
->masks
.nw_frag
|= FLOW_NW_FRAG_MASK
;
4746 if (xbridge
->enable_recirc
) {
4747 /* Always exactly match recirc_id when datapath supports
4749 wc
->masks
.recirc_id
= UINT32_MAX
;
4751 if (xbridge
->netflow
) {
4752 netflow_mask_wc(flow
, wc
);
4755 is_icmp
= is_icmpv4(flow
) || is_icmpv6(flow
);
4757 tnl_may_send
= tnl_xlate_init(&ctx
.base_flow
, flow
, wc
);
4761 ctx
.in_group
= false;
4762 ctx
.in_action_set
= false;
4763 ctx
.orig_skb_priority
= flow
->skb_priority
;
4765 ctx
.rule_cookie
= OVS_BE64_MAX
;
4767 ctx
.was_mpls
= false;
4768 ctx
.recirc_action_offset
= -1;
4769 ctx
.last_unroll_offset
= -1;
4771 ctx
.action_set_has_group
= false;
4772 ofpbuf_use_stub(&ctx
.action_set
,
4773 ctx
.action_set_stub
, sizeof ctx
.action_set_stub
);
4775 ofpbuf_use_stub(&ctx
.stack
, ctx
.init_stack
, sizeof ctx
.init_stack
);
4777 /* The in_port of the original packet before recirculation. */
4778 in_port
= get_ofp_port(xbridge
, flow
->in_port
.ofp_port
);
4781 const struct recirc_id_node
*recirc
= xin
->recirc
;
4783 if (xin
->ofpacts_len
> 0 || ctx
.rule
) {
4784 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(1, 1);
4786 VLOG_WARN_RL(&rl
, "Recirculation conflict (%s)!",
4787 xin
->ofpacts_len
> 0
4793 /* Set the bridge for post-recirculation processing if needed. */
4794 if (ctx
.xbridge
->ofproto
!= recirc
->ofproto
) {
4795 struct xlate_cfg
*xcfg
= ovsrcu_get(struct xlate_cfg
*, &xcfgp
);
4796 const struct xbridge
*new_bridge
= xbridge_lookup(xcfg
,
4799 if (OVS_UNLIKELY(!new_bridge
)) {
4800 /* Drop the packet if the bridge cannot be found. */
4801 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(1, 1);
4802 VLOG_WARN_RL(&rl
, "Recirculation bridge no longer exists.");
4805 ctx
.xbridge
= new_bridge
;
4808 /* Set the post-recirculation table id. Note: A table lookup is done
4809 * only if there are no post-recirculation actions. */
4810 ctx
.table_id
= recirc
->table_id
;
4812 /* Restore pipeline metadata. May change flow's in_port and other
4813 * metadata to the values that existed when recirculation was
4815 recirc_metadata_to_flow(&recirc
->metadata
, flow
);
4817 /* Restore stack, if any. */
4818 if (recirc
->stack
) {
4819 ofpbuf_put(&ctx
.stack
, recirc
->stack
->data
, recirc
->stack
->size
);
4822 /* Restore action set, if any. */
4823 if (recirc
->action_set_len
) {
4824 const struct ofpact
*a
;
4826 ofpbuf_put(&ctx
.action_set
, recirc
->ofpacts
,
4827 recirc
->action_set_len
);
4829 OFPACT_FOR_EACH(a
, recirc
->ofpacts
, recirc
->action_set_len
) {
4830 if (a
->type
== OFPACT_GROUP
) {
4831 ctx
.action_set_has_group
= true;
4837 /* Restore recirculation actions. If there are no actions, processing
4838 * will start with a lookup in the table set above. */
4839 if (recirc
->ofpacts_len
> recirc
->action_set_len
) {
4840 xin
->ofpacts_len
= recirc
->ofpacts_len
- recirc
->action_set_len
;
4841 xin
->ofpacts
= recirc
->ofpacts
+
4842 recirc
->action_set_len
/ sizeof *recirc
->ofpacts
;
4844 } else if (OVS_UNLIKELY(flow
->recirc_id
)) {
4845 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(1, 1);
4847 VLOG_WARN_RL(&rl
, "Recirculation context not found for ID %"PRIx32
,
4852 if (!xin
->ofpacts
&& !ctx
.rule
) {
4853 rule
= rule_dpif_lookup_from_table(ctx
.xbridge
->ofproto
, flow
, wc
,
4854 ctx
.xin
->xcache
!= NULL
,
4855 ctx
.xin
->resubmit_stats
,
4857 flow
->in_port
.ofp_port
, true, true);
4858 if (ctx
.xin
->resubmit_stats
) {
4859 rule_dpif_credit_stats(rule
, ctx
.xin
->resubmit_stats
);
4861 if (ctx
.xin
->xcache
) {
4862 struct xc_entry
*entry
;
4864 entry
= xlate_cache_add_entry(ctx
.xin
->xcache
, XC_RULE
);
4865 entry
->u
.rule
= rule
;
4869 if (OVS_UNLIKELY(ctx
.xin
->resubmit_hook
)) {
4870 ctx
.xin
->resubmit_hook(ctx
.xin
, rule
, 0);
4873 xout
->fail_open
= ctx
.rule
&& rule_dpif_is_fail_open(ctx
.rule
);
4876 ofpacts
= xin
->ofpacts
;
4877 ofpacts_len
= xin
->ofpacts_len
;
4878 } else if (ctx
.rule
) {
4879 const struct rule_actions
*actions
= rule_dpif_get_actions(ctx
.rule
);
4881 ofpacts
= actions
->ofpacts
;
4882 ofpacts_len
= actions
->ofpacts_len
;
4884 ctx
.rule_cookie
= rule_dpif_get_flow_cookie(ctx
.rule
);
4889 if (mbridge_has_mirrors(xbridge
->mbridge
)) {
4890 /* Do this conditionally because the copy is expensive enough that it
4891 * shows up in profiles. */
4895 /* Tunnel stats only for non-recirculated packets. */
4896 if (!xin
->recirc
&& in_port
&& in_port
->is_tunnel
) {
4897 if (ctx
.xin
->resubmit_stats
) {
4898 netdev_vport_inc_rx(in_port
->netdev
, ctx
.xin
->resubmit_stats
);
4900 bfd_account_rx(in_port
->bfd
, ctx
.xin
->resubmit_stats
);
4903 if (ctx
.xin
->xcache
) {
4904 struct xc_entry
*entry
;
4906 entry
= xlate_cache_add_entry(ctx
.xin
->xcache
, XC_NETDEV
);
4907 entry
->u
.dev
.rx
= netdev_ref(in_port
->netdev
);
4908 entry
->u
.dev
.bfd
= bfd_ref(in_port
->bfd
);
4912 /* Do not perform special processing on recirculated packets,
4913 * as recirculated packets are not really received by the bridge. */
4915 (special
= process_special(&ctx
, flow
, in_port
, ctx
.xin
->packet
))) {
4916 ctx
.xout
->slow
|= special
;
4918 size_t sample_actions_len
;
4920 if (flow
->in_port
.ofp_port
4921 != vsp_realdev_to_vlandev(xbridge
->ofproto
,
4922 flow
->in_port
.ofp_port
,
4924 ctx
.base_flow
.vlan_tci
= 0;
4927 /* Sampling is done only for packets really received by the bridge. */
4929 add_sflow_action(&ctx
);
4930 add_ipfix_action(&ctx
);
4931 sample_actions_len
= ctx
.xout
->odp_actions
->size
;
4933 sample_actions_len
= 0;
4936 if (tnl_may_send
&& (!in_port
|| may_receive(in_port
, &ctx
))) {
4937 do_xlate_actions(ofpacts
, ofpacts_len
, &ctx
);
4939 /* We've let OFPP_NORMAL and the learning action look at the
4940 * packet, so drop it now if forwarding is disabled. */
4941 if (in_port
&& (!xport_stp_forward_state(in_port
) ||
4942 !xport_rstp_forward_state(in_port
))) {
4943 /* Drop all actions added by do_xlate_actions() above. */
4944 ctx
.xout
->odp_actions
->size
= sample_actions_len
;
4946 /* Undo changes that may have been done for recirculation. */
4947 if (exit_recirculates(&ctx
)) {
4948 ctx
.action_set
.size
= ctx
.recirc_action_offset
;
4949 ctx
.recirc_action_offset
= -1;
4950 ctx
.last_unroll_offset
= -1;
4952 } else if (ctx
.action_set
.size
) {
4953 /* Translate action set only if not dropping the packet and
4954 * not recirculating. */
4955 if (!exit_recirculates(&ctx
)) {
4956 xlate_action_set(&ctx
);
4959 /* Check if need to recirculate. */
4960 if (exit_recirculates(&ctx
)) {
4961 compose_recirculate_action(&ctx
);
4965 /* Output only fully processed packets. */
4966 if (!exit_recirculates(&ctx
)
4967 && xbridge
->has_in_band
4968 && in_band_must_output_to_local_port(flow
)
4969 && !actions_output_to_local_port(&ctx
)) {
4970 compose_output_action(&ctx
, OFPP_LOCAL
, NULL
);
4974 fix_sflow_action(&ctx
);
4976 /* Only mirror fully processed packets. */
4977 if (!exit_recirculates(&ctx
)
4978 && mbridge_has_mirrors(xbridge
->mbridge
)) {
4979 add_mirror_actions(&ctx
, &orig_flow
);
4983 if (nl_attr_oversized(ctx
.xout
->odp_actions
->size
)) {
4984 /* These datapath actions are too big for a Netlink attribute, so we
4985 * can't hand them to the kernel directly. dpif_execute() can execute
4986 * them one by one with help, so just mark the result as SLOW_ACTION to
4987 * prevent the flow from being installed. */
4988 COVERAGE_INC(xlate_actions_oversize
);
4989 ctx
.xout
->slow
|= SLOW_ACTION
;
4990 } else if (too_many_output_actions(ctx
.xout
->odp_actions
)) {
4991 COVERAGE_INC(xlate_actions_too_many_output
);
4992 ctx
.xout
->slow
|= SLOW_ACTION
;
4995 /* Update mirror stats only for packets really received by the bridge. */
4996 if (!xin
->recirc
&& mbridge_has_mirrors(xbridge
->mbridge
)) {
4997 if (ctx
.xin
->resubmit_stats
) {
4998 mirror_update_stats(xbridge
->mbridge
, xout
->mirrors
,
4999 ctx
.xin
->resubmit_stats
->n_packets
,
5000 ctx
.xin
->resubmit_stats
->n_bytes
);
5002 if (ctx
.xin
->xcache
) {
5003 struct xc_entry
*entry
;
5005 entry
= xlate_cache_add_entry(ctx
.xin
->xcache
, XC_MIRROR
);
5006 entry
->u
.mirror
.mbridge
= mbridge_ref(xbridge
->mbridge
);
5007 entry
->u
.mirror
.mirrors
= xout
->mirrors
;
5011 /* Do netflow only for packets really received by the bridge. */
5012 if (!xin
->recirc
&& xbridge
->netflow
) {
5013 /* Only update netflow if we don't have controller flow. We don't
5014 * report NetFlow expiration messages for such facets because they
5015 * are just part of the control logic for the network, not real
5017 if (ofpacts_len
== 0
5018 || ofpacts
->type
!= OFPACT_CONTROLLER
5019 || ofpact_next(ofpacts
) < ofpact_end(ofpacts
, ofpacts_len
)) {
5020 if (ctx
.xin
->resubmit_stats
) {
5021 netflow_flow_update(xbridge
->netflow
, flow
,
5022 xout
->nf_output_iface
,
5023 ctx
.xin
->resubmit_stats
);
5025 if (ctx
.xin
->xcache
) {
5026 struct xc_entry
*entry
;
5028 entry
= xlate_cache_add_entry(ctx
.xin
->xcache
, XC_NETFLOW
);
5029 entry
->u
.nf
.netflow
= netflow_ref(xbridge
->netflow
);
5030 entry
->u
.nf
.flow
= xmemdup(flow
, sizeof *flow
);
5031 entry
->u
.nf
.iface
= xout
->nf_output_iface
;
5036 ofpbuf_uninit(&ctx
.stack
);
5037 ofpbuf_uninit(&ctx
.action_set
);
5040 /* Clear the metadata and register wildcard masks, because we won't
5041 * use non-header fields as part of the cache. */
5042 flow_wildcards_clear_non_packet_fields(wc
);
5044 /* ICMPv4 and ICMPv6 have 8-bit "type" and "code" fields. struct flow
5045 * uses the low 8 bits of the 16-bit tp_src and tp_dst members to
5046 * represent these fields. The datapath interface, on the other hand,
5047 * represents them with just 8 bits each. This means that if the high
5048 * 8 bits of the masks for these fields somehow become set, then they
5049 * will get chopped off by a round trip through the datapath, and
5050 * revalidation will spot that as an inconsistency and delete the flow.
5051 * Avoid the problem here by making sure that only the low 8 bits of
5052 * either field can be unwildcarded for ICMP.
5055 wc
->masks
.tp_src
&= htons(UINT8_MAX
);
5056 wc
->masks
.tp_dst
&= htons(UINT8_MAX
);
5061 /* Sends 'packet' out 'ofport'.
5062 * May modify 'packet'.
5063 * Returns 0 if successful, otherwise a positive errno value. */
5065 xlate_send_packet(const struct ofport_dpif
*ofport
, struct dp_packet
*packet
)
5067 struct xlate_cfg
*xcfg
= ovsrcu_get(struct xlate_cfg
*, &xcfgp
);
5068 struct xport
*xport
;
5069 struct ofpact_output output
;
5072 ofpact_init(&output
.ofpact
, OFPACT_OUTPUT
, sizeof output
);
5073 /* Use OFPP_NONE as the in_port to avoid special packet processing. */
5074 flow_extract(packet
, &flow
);
5075 flow
.in_port
.ofp_port
= OFPP_NONE
;
5077 xport
= xport_lookup(xcfg
, ofport
);
5081 output
.port
= xport
->ofp_port
;
5084 return ofproto_dpif_execute_actions(xport
->xbridge
->ofproto
, &flow
, NULL
,
5085 &output
.ofpact
, sizeof output
,
5089 struct xlate_cache
*
5090 xlate_cache_new(void)
5092 struct xlate_cache
*xcache
= xmalloc(sizeof *xcache
);
5094 ofpbuf_init(&xcache
->entries
, 512);
5098 static struct xc_entry
*
5099 xlate_cache_add_entry(struct xlate_cache
*xcache
, enum xc_type type
)
5101 struct xc_entry
*entry
;
5103 entry
= ofpbuf_put_zeros(&xcache
->entries
, sizeof *entry
);
5110 xlate_cache_netdev(struct xc_entry
*entry
, const struct dpif_flow_stats
*stats
)
5112 if (entry
->u
.dev
.tx
) {
5113 netdev_vport_inc_tx(entry
->u
.dev
.tx
, stats
);
5115 if (entry
->u
.dev
.rx
) {
5116 netdev_vport_inc_rx(entry
->u
.dev
.rx
, stats
);
5118 if (entry
->u
.dev
.bfd
) {
5119 bfd_account_rx(entry
->u
.dev
.bfd
, stats
);
5124 xlate_cache_normal(struct ofproto_dpif
*ofproto
, struct flow
*flow
, int vlan
)
5126 struct xlate_cfg
*xcfg
= ovsrcu_get(struct xlate_cfg
*, &xcfgp
);
5127 struct xbridge
*xbridge
;
5128 struct xbundle
*xbundle
;
5129 struct flow_wildcards wc
;
5131 xbridge
= xbridge_lookup(xcfg
, ofproto
);
5136 xbundle
= lookup_input_bundle(xbridge
, flow
->in_port
.ofp_port
, false,
5142 update_learning_table(xbridge
, flow
, &wc
, vlan
, xbundle
);
5145 /* Push stats and perform side effects of flow translation. */
5147 xlate_push_stats(struct xlate_cache
*xcache
,
5148 const struct dpif_flow_stats
*stats
)
5150 struct xc_entry
*entry
;
5151 struct ofpbuf entries
= xcache
->entries
;
5152 uint8_t dmac
[ETH_ADDR_LEN
];
5154 if (!stats
->n_packets
) {
5158 XC_ENTRY_FOR_EACH (entry
, entries
, xcache
) {
5159 switch (entry
->type
) {
5161 rule_dpif_credit_stats(entry
->u
.rule
, stats
);
5164 bond_account(entry
->u
.bond
.bond
, entry
->u
.bond
.flow
,
5165 entry
->u
.bond
.vid
, stats
->n_bytes
);
5168 xlate_cache_netdev(entry
, stats
);
5171 netflow_flow_update(entry
->u
.nf
.netflow
, entry
->u
.nf
.flow
,
5172 entry
->u
.nf
.iface
, stats
);
5175 mirror_update_stats(entry
->u
.mirror
.mbridge
,
5176 entry
->u
.mirror
.mirrors
,
5177 stats
->n_packets
, stats
->n_bytes
);
5180 ofproto_dpif_flow_mod(entry
->u
.learn
.ofproto
, entry
->u
.learn
.fm
);
5183 xlate_cache_normal(entry
->u
.normal
.ofproto
, entry
->u
.normal
.flow
,
5184 entry
->u
.normal
.vlan
);
5186 case XC_FIN_TIMEOUT
:
5187 xlate_fin_timeout__(entry
->u
.fin
.rule
, stats
->tcp_flags
,
5188 entry
->u
.fin
.idle
, entry
->u
.fin
.hard
);
5191 group_dpif_credit_stats(entry
->u
.group
.group
, entry
->u
.group
.bucket
,
5195 /* Lookup arp to avoid arp timeout. */
5196 tnl_arp_lookup(entry
->u
.tnl_arp_cache
.br_name
, entry
->u
.tnl_arp_cache
.d_ip
, dmac
);
5205 xlate_dev_unref(struct xc_entry
*entry
)
5207 if (entry
->u
.dev
.tx
) {
5208 netdev_close(entry
->u
.dev
.tx
);
5210 if (entry
->u
.dev
.rx
) {
5211 netdev_close(entry
->u
.dev
.rx
);
5213 if (entry
->u
.dev
.bfd
) {
5214 bfd_unref(entry
->u
.dev
.bfd
);
5219 xlate_cache_clear_netflow(struct netflow
*netflow
, struct flow
*flow
)
5221 netflow_flow_clear(netflow
, flow
);
5222 netflow_unref(netflow
);
5227 xlate_cache_clear(struct xlate_cache
*xcache
)
5229 struct xc_entry
*entry
;
5230 struct ofpbuf entries
;
5236 XC_ENTRY_FOR_EACH (entry
, entries
, xcache
) {
5237 switch (entry
->type
) {
5239 rule_dpif_unref(entry
->u
.rule
);
5242 free(entry
->u
.bond
.flow
);
5243 bond_unref(entry
->u
.bond
.bond
);
5246 xlate_dev_unref(entry
);
5249 xlate_cache_clear_netflow(entry
->u
.nf
.netflow
, entry
->u
.nf
.flow
);
5252 mbridge_unref(entry
->u
.mirror
.mbridge
);
5255 free(entry
->u
.learn
.fm
);
5256 ofpbuf_delete(entry
->u
.learn
.ofpacts
);
5259 free(entry
->u
.normal
.flow
);
5261 case XC_FIN_TIMEOUT
:
5262 /* 'u.fin.rule' is always already held as a XC_RULE, which
5263 * has already released it's reference above. */
5266 group_dpif_unref(entry
->u
.group
.group
);
5275 ofpbuf_clear(&xcache
->entries
);
5279 xlate_cache_delete(struct xlate_cache
*xcache
)
5281 xlate_cache_clear(xcache
);
5282 ofpbuf_uninit(&xcache
->entries
);