2 * Copyright (c) 2009, 2010, 2011, 2012, 2013 Nicira, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
19 #include "ofproto/ofproto-provider.h"
25 #include "byte-order.h"
30 #include "dynamic-string.h"
31 #include "fail-open.h"
35 #include "mac-learning.h"
36 #include "meta-flow.h"
37 #include "multipath.h"
38 #include "netdev-vport.h"
45 #include "ofp-actions.h"
46 #include "ofp-parse.h"
47 #include "ofp-print.h"
48 #include "ofproto-dpif-governor.h"
49 #include "ofproto-dpif-sflow.h"
50 #include "poll-loop.h"
55 #include "unaligned.h"
57 #include "vlan-bitmap.h"
60 VLOG_DEFINE_THIS_MODULE(ofproto_dpif
);
62 COVERAGE_DEFINE(ofproto_dpif_expired
);
63 COVERAGE_DEFINE(ofproto_dpif_xlate
);
64 COVERAGE_DEFINE(facet_changed_rule
);
65 COVERAGE_DEFINE(facet_revalidate
);
66 COVERAGE_DEFINE(facet_unexpected
);
67 COVERAGE_DEFINE(facet_suppress
);
69 /* Maximum depth of flow table recursion (due to resubmit actions) in a
70 * flow translation. */
71 #define MAX_RESUBMIT_RECURSION 64
73 /* Number of implemented OpenFlow tables. */
74 enum { N_TABLES
= 255 };
75 enum { TBL_INTERNAL
= N_TABLES
- 1 }; /* Used for internal hidden rules. */
76 BUILD_ASSERT_DECL(N_TABLES
>= 2 && N_TABLES
<= 255);
87 * - Do include packets and bytes from facets that have been deleted or
88 * whose own statistics have been folded into the rule.
90 * - Do include packets and bytes sent "by hand" that were accounted to
91 * the rule without any facet being involved (this is a rare corner
92 * case in rule_execute()).
94 * - Do not include packet or bytes that can be obtained from any facet's
95 * packet_count or byte_count member or that can be obtained from the
96 * datapath by, e.g., dpif_flow_get() for any subfacet.
98 uint64_t packet_count
; /* Number of packets received. */
99 uint64_t byte_count
; /* Number of bytes received. */
101 tag_type tag
; /* Caches rule_calculate_tag() result. */
103 struct list facets
; /* List of "struct facet"s. */
106 static struct rule_dpif
*rule_dpif_cast(const struct rule
*rule
)
108 return rule
? CONTAINER_OF(rule
, struct rule_dpif
, up
) : NULL
;
111 static struct rule_dpif
*rule_dpif_lookup(struct ofproto_dpif
*,
112 const struct flow
*);
113 static struct rule_dpif
*rule_dpif_lookup__(struct ofproto_dpif
*,
116 static struct rule_dpif
*rule_dpif_miss_rule(struct ofproto_dpif
*ofproto
,
117 const struct flow
*flow
);
119 static void rule_credit_stats(struct rule_dpif
*,
120 const struct dpif_flow_stats
*);
121 static void flow_push_stats(struct rule_dpif
*, const struct flow
*,
122 const struct dpif_flow_stats
*);
123 static tag_type
rule_calculate_tag(const struct flow
*,
124 const struct minimask
*, uint32_t basis
);
125 static void rule_invalidate(const struct rule_dpif
*);
127 #define MAX_MIRRORS 32
128 typedef uint32_t mirror_mask_t
;
129 #define MIRROR_MASK_C(X) UINT32_C(X)
130 BUILD_ASSERT_DECL(sizeof(mirror_mask_t
) * CHAR_BIT
>= MAX_MIRRORS
);
132 struct ofproto_dpif
*ofproto
; /* Owning ofproto. */
133 size_t idx
; /* In ofproto's "mirrors" array. */
134 void *aux
; /* Key supplied by ofproto's client. */
135 char *name
; /* Identifier for log messages. */
137 /* Selection criteria. */
138 struct hmapx srcs
; /* Contains "struct ofbundle *"s. */
139 struct hmapx dsts
; /* Contains "struct ofbundle *"s. */
140 unsigned long *vlans
; /* Bitmap of chosen VLANs, NULL selects all. */
142 /* Output (exactly one of out == NULL and out_vlan == -1 is true). */
143 struct ofbundle
*out
; /* Output port or NULL. */
144 int out_vlan
; /* Output VLAN or -1. */
145 mirror_mask_t dup_mirrors
; /* Bitmap of mirrors with the same output. */
148 int64_t packet_count
; /* Number of packets sent. */
149 int64_t byte_count
; /* Number of bytes sent. */
152 static void mirror_destroy(struct ofmirror
*);
153 static void update_mirror_stats(struct ofproto_dpif
*ofproto
,
154 mirror_mask_t mirrors
,
155 uint64_t packets
, uint64_t bytes
);
158 struct hmap_node hmap_node
; /* In struct ofproto's "bundles" hmap. */
159 struct ofproto_dpif
*ofproto
; /* Owning ofproto. */
160 void *aux
; /* Key supplied by ofproto's client. */
161 char *name
; /* Identifier for log messages. */
164 struct list ports
; /* Contains "struct ofport"s. */
165 enum port_vlan_mode vlan_mode
; /* VLAN mode */
166 int vlan
; /* -1=trunk port, else a 12-bit VLAN ID. */
167 unsigned long *trunks
; /* Bitmap of trunked VLANs, if 'vlan' == -1.
168 * NULL if all VLANs are trunked. */
169 struct lacp
*lacp
; /* LACP if LACP is enabled, otherwise NULL. */
170 struct bond
*bond
; /* Nonnull iff more than one port. */
171 bool use_priority_tags
; /* Use 802.1p tag for frames in VLAN 0? */
174 bool floodable
; /* True if no port has OFPUTIL_PC_NO_FLOOD set. */
176 /* Port mirroring info. */
177 mirror_mask_t src_mirrors
; /* Mirrors triggered when packet received. */
178 mirror_mask_t dst_mirrors
; /* Mirrors triggered when packet sent. */
179 mirror_mask_t mirror_out
; /* Mirrors that output to this bundle. */
182 static void bundle_remove(struct ofport
*);
183 static void bundle_update(struct ofbundle
*);
184 static void bundle_destroy(struct ofbundle
*);
185 static void bundle_del_port(struct ofport_dpif
*);
186 static void bundle_run(struct ofbundle
*);
187 static void bundle_wait(struct ofbundle
*);
188 static struct ofbundle
*lookup_input_bundle(const struct ofproto_dpif
*,
189 uint16_t in_port
, bool warn
,
190 struct ofport_dpif
**in_ofportp
);
192 /* A controller may use OFPP_NONE as the ingress port to indicate that
193 * it did not arrive on a "real" port. 'ofpp_none_bundle' exists for
194 * when an input bundle is needed for validation (e.g., mirroring or
195 * OFPP_NORMAL processing). It is not connected to an 'ofproto' or have
196 * any 'port' structs, so care must be taken when dealing with it. */
197 static struct ofbundle ofpp_none_bundle
= {
199 .vlan_mode
= PORT_VLAN_TRUNK
202 static void stp_run(struct ofproto_dpif
*ofproto
);
203 static void stp_wait(struct ofproto_dpif
*ofproto
);
204 static int set_stp_port(struct ofport
*,
205 const struct ofproto_port_stp_settings
*);
207 static bool ofbundle_includes_vlan(const struct ofbundle
*, uint16_t vlan
);
209 struct action_xlate_ctx
{
210 /* action_xlate_ctx_init() initializes these members. */
213 struct ofproto_dpif
*ofproto
;
215 /* Flow to which the OpenFlow actions apply. xlate_actions() will modify
216 * this flow when actions change header fields. */
219 /* stack for the push and pop actions.
220 * Each stack element is of the type "union mf_subvalue". */
222 union mf_subvalue init_stack
[1024 / sizeof(union mf_subvalue
)];
224 /* The packet corresponding to 'flow', or a null pointer if we are
225 * revalidating without a packet to refer to. */
226 const struct ofpbuf
*packet
;
228 /* Should OFPP_NORMAL update the MAC learning table? Should "learn"
229 * actions update the flow table?
231 * We want to update these tables if we are actually processing a packet,
232 * or if we are accounting for packets that the datapath has processed, but
233 * not if we are just revalidating. */
236 /* The rule that we are currently translating, or NULL. */
237 struct rule_dpif
*rule
;
239 /* Union of the set of TCP flags seen so far in this flow. (Used only by
240 * NXAST_FIN_TIMEOUT. Set to zero to avoid updating updating rules'
244 /* If nonnull, flow translation calls this function just before executing a
245 * resubmit or OFPP_TABLE action. In addition, disables logging of traces
246 * when the recursion depth is exceeded.
248 * 'rule' is the rule being submitted into. It will be null if the
249 * resubmit or OFPP_TABLE action didn't find a matching rule.
251 * This is normally null so the client has to set it manually after
252 * calling action_xlate_ctx_init(). */
253 void (*resubmit_hook
)(struct action_xlate_ctx
*, struct rule_dpif
*rule
);
255 /* If nonnull, flow translation calls this function to report some
256 * significant decision, e.g. to explain why OFPP_NORMAL translation
257 * dropped a packet. */
258 void (*report_hook
)(struct action_xlate_ctx
*, const char *s
);
260 /* If nonnull, flow translation credits the specified statistics to each
261 * rule reached through a resubmit or OFPP_TABLE action.
263 * This is normally null so the client has to set it manually after
264 * calling action_xlate_ctx_init(). */
265 const struct dpif_flow_stats
*resubmit_stats
;
267 /* xlate_actions() initializes and uses these members. The client might want
268 * to look at them after it returns. */
270 struct ofpbuf
*odp_actions
; /* Datapath actions. */
271 tag_type tags
; /* Tags associated with actions. */
272 enum slow_path_reason slow
; /* 0 if fast path may be used. */
273 bool has_learn
; /* Actions include NXAST_LEARN? */
274 bool has_normal
; /* Actions output to OFPP_NORMAL? */
275 bool has_fin_timeout
; /* Actions include NXAST_FIN_TIMEOUT? */
276 uint16_t nf_output_iface
; /* Output interface index for NetFlow. */
277 mirror_mask_t mirrors
; /* Bitmap of associated mirrors. */
279 /* xlate_actions() initializes and uses these members, but the client has no
280 * reason to look at them. */
282 int recurse
; /* Recursion level, via xlate_table_action. */
283 bool max_resubmit_trigger
; /* Recursed too deeply during translation. */
284 struct flow base_flow
; /* Flow at the last commit. */
285 uint32_t orig_skb_priority
; /* Priority when packet arrived. */
286 uint8_t table_id
; /* OpenFlow table ID where flow was found. */
287 uint32_t sflow_n_outputs
; /* Number of output ports. */
288 uint32_t sflow_odp_port
; /* Output port for composing sFlow action. */
289 uint16_t user_cookie_offset
;/* Used for user_action_cookie fixup. */
290 bool exit
; /* No further actions should be processed. */
293 static void action_xlate_ctx_init(struct action_xlate_ctx
*,
294 struct ofproto_dpif
*, const struct flow
*,
295 ovs_be16 initial_tci
, struct rule_dpif
*,
296 uint8_t tcp_flags
, const struct ofpbuf
*);
297 static void xlate_actions(struct action_xlate_ctx
*,
298 const struct ofpact
*ofpacts
, size_t ofpacts_len
,
299 struct ofpbuf
*odp_actions
);
300 static void xlate_actions_for_side_effects(struct action_xlate_ctx
*,
301 const struct ofpact
*ofpacts
,
303 static void xlate_table_action(struct action_xlate_ctx
*, uint16_t in_port
,
304 uint8_t table_id
, bool may_packet_in
);
306 static size_t put_userspace_action(const struct ofproto_dpif
*,
307 struct ofpbuf
*odp_actions
,
309 const union user_action_cookie
*);
311 static void compose_slow_path(const struct ofproto_dpif
*, const struct flow
*,
312 enum slow_path_reason
,
313 uint64_t *stub
, size_t stub_size
,
314 const struct nlattr
**actionsp
,
315 size_t *actions_lenp
);
317 static void xlate_report(struct action_xlate_ctx
*ctx
, const char *s
);
319 /* A subfacet (see "struct subfacet" below) has three possible installation
322 * - SF_NOT_INSTALLED: Not installed in the datapath. This will only be the
323 * case just after the subfacet is created, just before the subfacet is
324 * destroyed, or if the datapath returns an error when we try to install a
327 * - SF_FAST_PATH: The subfacet's actions are installed in the datapath.
329 * - SF_SLOW_PATH: An action that sends every packet for the subfacet through
330 * ofproto_dpif is installed in the datapath.
333 SF_NOT_INSTALLED
, /* No datapath flow for this subfacet. */
334 SF_FAST_PATH
, /* Full actions are installed. */
335 SF_SLOW_PATH
, /* Send-to-userspace action is installed. */
338 static const char *subfacet_path_to_string(enum subfacet_path
);
340 /* A dpif flow and actions associated with a facet.
342 * See also the large comment on struct facet. */
345 struct hmap_node hmap_node
; /* In struct ofproto_dpif 'subfacets' list. */
346 struct list list_node
; /* In struct facet's 'facets' list. */
347 struct facet
*facet
; /* Owning facet. */
349 enum odp_key_fitness key_fitness
;
353 long long int used
; /* Time last used; time created if not used. */
355 uint64_t dp_packet_count
; /* Last known packet count in the datapath. */
356 uint64_t dp_byte_count
; /* Last known byte count in the datapath. */
360 * These should be essentially identical for every subfacet in a facet, but
361 * may differ in trivial ways due to VLAN splinters. */
362 size_t actions_len
; /* Number of bytes in actions[]. */
363 struct nlattr
*actions
; /* Datapath actions. */
365 enum slow_path_reason slow
; /* 0 if fast path may be used. */
366 enum subfacet_path path
; /* Installed in datapath? */
368 /* This value is normally the same as ->facet->flow.vlan_tci. Only VLAN
369 * splinters can cause it to differ. This value should be removed when
370 * the VLAN splinters feature is no longer needed. */
371 ovs_be16 initial_tci
; /* Initial VLAN TCI value. */
373 /* Datapath port the packet arrived on. This is needed to remove
374 * flows for ports that are no longer part of the bridge. Since the
375 * flow definition only has the OpenFlow port number and the port is
376 * no longer part of the bridge, we can't determine the datapath port
377 * number needed to delete the flow from the datapath. */
378 uint32_t odp_in_port
;
381 #define SUBFACET_DESTROY_MAX_BATCH 50
383 static struct subfacet
*subfacet_create(struct facet
*, struct flow_miss
*miss
,
385 static struct subfacet
*subfacet_find(struct ofproto_dpif
*,
386 const struct nlattr
*key
, size_t key_len
,
388 static void subfacet_destroy(struct subfacet
*);
389 static void subfacet_destroy__(struct subfacet
*);
390 static void subfacet_destroy_batch(struct ofproto_dpif
*,
391 struct subfacet
**, int n
);
392 static void subfacet_reset_dp_stats(struct subfacet
*,
393 struct dpif_flow_stats
*);
394 static void subfacet_update_time(struct subfacet
*, long long int used
);
395 static void subfacet_update_stats(struct subfacet
*,
396 const struct dpif_flow_stats
*);
397 static void subfacet_make_actions(struct subfacet
*,
398 const struct ofpbuf
*packet
,
399 struct ofpbuf
*odp_actions
);
400 static int subfacet_install(struct subfacet
*,
401 const struct nlattr
*actions
, size_t actions_len
,
402 struct dpif_flow_stats
*, enum slow_path_reason
);
403 static void subfacet_uninstall(struct subfacet
*);
405 static enum subfacet_path
subfacet_want_path(enum slow_path_reason
);
407 /* An exact-match instantiation of an OpenFlow flow.
409 * A facet associates a "struct flow", which represents the Open vSwitch
410 * userspace idea of an exact-match flow, with one or more subfacets. Each
411 * subfacet tracks the datapath's idea of the exact-match flow equivalent to
412 * the facet. When the kernel module (or other dpif implementation) and Open
413 * vSwitch userspace agree on the definition of a flow key, there is exactly
414 * one subfacet per facet. If the dpif implementation supports more-specific
415 * flow matching than userspace, however, a facet can have more than one
416 * subfacet, each of which corresponds to some distinction in flow that
417 * userspace simply doesn't understand.
419 * Flow expiration works in terms of subfacets, so a facet must have at least
420 * one subfacet or it will never expire, leaking memory. */
423 struct hmap_node hmap_node
; /* In owning ofproto's 'facets' hmap. */
424 struct list list_node
; /* In owning rule's 'facets' list. */
425 struct rule_dpif
*rule
; /* Owning rule. */
428 struct list subfacets
;
429 long long int used
; /* Time last used; time created if not used. */
436 * - Do include packets and bytes sent "by hand", e.g. with
439 * - Do include packets and bytes that were obtained from the datapath
440 * when a subfacet's statistics were reset (e.g. dpif_flow_put() with
441 * DPIF_FP_ZERO_STATS).
443 * - Do not include packets or bytes that can be obtained from the
444 * datapath for any existing subfacet.
446 uint64_t packet_count
; /* Number of packets received. */
447 uint64_t byte_count
; /* Number of bytes received. */
449 /* Resubmit statistics. */
450 uint64_t prev_packet_count
; /* Number of packets from last stats push. */
451 uint64_t prev_byte_count
; /* Number of bytes from last stats push. */
452 long long int prev_used
; /* Used time from last stats push. */
455 uint64_t accounted_bytes
; /* Bytes processed by facet_account(). */
456 struct netflow_flow nf_flow
; /* Per-flow NetFlow tracking data. */
457 uint8_t tcp_flags
; /* TCP flags seen for this 'rule'. */
459 /* Properties of datapath actions.
461 * Every subfacet has its own actions because actions can differ slightly
462 * between splintered and non-splintered subfacets due to the VLAN tag
463 * being initially different (present vs. absent). All of them have these
464 * properties in common so we just store one copy of them here. */
465 bool has_learn
; /* Actions include NXAST_LEARN? */
466 bool has_normal
; /* Actions output to OFPP_NORMAL? */
467 bool has_fin_timeout
; /* Actions include NXAST_FIN_TIMEOUT? */
468 tag_type tags
; /* Tags that would require revalidation. */
469 mirror_mask_t mirrors
; /* Bitmap of dependent mirrors. */
471 /* Storage for a single subfacet, to reduce malloc() time and space
472 * overhead. (A facet always has at least one subfacet and in the common
473 * case has exactly one subfacet.) */
474 struct subfacet one_subfacet
;
477 static struct facet
*facet_create(struct rule_dpif
*,
478 const struct flow
*, uint32_t hash
);
479 static void facet_remove(struct facet
*);
480 static void facet_free(struct facet
*);
482 static struct facet
*facet_find(struct ofproto_dpif
*,
483 const struct flow
*, uint32_t hash
);
484 static struct facet
*facet_lookup_valid(struct ofproto_dpif
*,
485 const struct flow
*, uint32_t hash
);
486 static void facet_revalidate(struct facet
*);
487 static bool facet_check_consistency(struct facet
*);
489 static void facet_flush_stats(struct facet
*);
491 static void facet_update_time(struct facet
*, long long int used
);
492 static void facet_reset_counters(struct facet
*);
493 static void facet_push_stats(struct facet
*);
494 static void facet_learn(struct facet
*);
495 static void facet_account(struct facet
*);
497 static bool facet_is_controller_flow(struct facet
*);
500 struct hmap_node odp_port_node
; /* In dpif_backer's "odp_to_ofport_map". */
504 struct ofbundle
*bundle
; /* Bundle that contains this port, if any. */
505 struct list bundle_node
; /* In struct ofbundle's "ports" list. */
506 struct cfm
*cfm
; /* Connectivity Fault Management, if any. */
507 tag_type tag
; /* Tag associated with this port. */
508 bool may_enable
; /* May be enabled in bonds. */
509 long long int carrier_seq
; /* Carrier status changes. */
510 struct tnl_port
*tnl_port
; /* Tunnel handle, or null. */
513 struct stp_port
*stp_port
; /* Spanning Tree Protocol, if any. */
514 enum stp_state stp_state
; /* Always STP_DISABLED if STP not in use. */
515 long long int stp_state_entered
;
517 struct hmap priorities
; /* Map of attached 'priority_to_dscp's. */
519 /* Linux VLAN device support (e.g. "eth0.10" for VLAN 10.)
521 * This is deprecated. It is only for compatibility with broken device
522 * drivers in old versions of Linux that do not properly support VLANs when
523 * VLAN devices are not used. When broken device drivers are no longer in
524 * widespread use, we will delete these interfaces. */
525 uint16_t realdev_ofp_port
;
529 /* Node in 'ofport_dpif''s 'priorities' map. Used to maintain a map from
530 * 'priority' (the datapath's term for QoS queue) to the dscp bits which all
531 * traffic egressing the 'ofport' with that priority should be marked with. */
532 struct priority_to_dscp
{
533 struct hmap_node hmap_node
; /* Node in 'ofport_dpif''s 'priorities' map. */
534 uint32_t priority
; /* Priority of this queue (see struct flow). */
536 uint8_t dscp
; /* DSCP bits to mark outgoing traffic with. */
539 /* Linux VLAN device support (e.g. "eth0.10" for VLAN 10.)
541 * This is deprecated. It is only for compatibility with broken device drivers
542 * in old versions of Linux that do not properly support VLANs when VLAN
543 * devices are not used. When broken device drivers are no longer in
544 * widespread use, we will delete these interfaces. */
545 struct vlan_splinter
{
546 struct hmap_node realdev_vid_node
;
547 struct hmap_node vlandev_node
;
548 uint16_t realdev_ofp_port
;
549 uint16_t vlandev_ofp_port
;
553 static uint32_t vsp_realdev_to_vlandev(const struct ofproto_dpif
*,
554 uint32_t realdev
, ovs_be16 vlan_tci
);
555 static bool vsp_adjust_flow(const struct ofproto_dpif
*, struct flow
*);
556 static void vsp_remove(struct ofport_dpif
*);
557 static void vsp_add(struct ofport_dpif
*, uint16_t realdev_ofp_port
, int vid
);
559 static uint32_t ofp_port_to_odp_port(const struct ofproto_dpif
*,
561 static uint16_t odp_port_to_ofp_port(const struct ofproto_dpif
*,
564 static struct ofport_dpif
*
565 ofport_dpif_cast(const struct ofport
*ofport
)
567 ovs_assert(ofport
->ofproto
->ofproto_class
== &ofproto_dpif_class
);
568 return ofport
? CONTAINER_OF(ofport
, struct ofport_dpif
, up
) : NULL
;
571 static void port_run(struct ofport_dpif
*);
572 static void port_run_fast(struct ofport_dpif
*);
573 static void port_wait(struct ofport_dpif
*);
574 static int set_cfm(struct ofport
*, const struct cfm_settings
*);
575 static void ofport_clear_priorities(struct ofport_dpif
*);
577 struct dpif_completion
{
578 struct list list_node
;
579 struct ofoperation
*op
;
582 /* Extra information about a classifier table.
583 * Currently used just for optimized flow revalidation. */
585 /* If either of these is nonnull, then this table has a form that allows
586 * flows to be tagged to avoid revalidating most flows for the most common
587 * kinds of flow table changes. */
588 struct cls_table
*catchall_table
; /* Table that wildcards all fields. */
589 struct cls_table
*other_table
; /* Table with any other wildcard set. */
590 uint32_t basis
; /* Keeps each table's tags separate. */
593 /* Reasons that we might need to revalidate every facet, and corresponding
596 * A value of 0 means that there is no need to revalidate.
598 * It would be nice to have some cleaner way to integrate with coverage
599 * counters, but with only a few reasons I guess this is good enough for
601 enum revalidate_reason
{
602 REV_RECONFIGURE
= 1, /* Switch configuration changed. */
603 REV_STP
, /* Spanning tree protocol port status change. */
604 REV_PORT_TOGGLED
, /* Port enabled or disabled by CFM, LACP, ...*/
605 REV_FLOW_TABLE
, /* Flow table changed. */
606 REV_INCONSISTENCY
/* Facet self-check failed. */
608 COVERAGE_DEFINE(rev_reconfigure
);
609 COVERAGE_DEFINE(rev_stp
);
610 COVERAGE_DEFINE(rev_port_toggled
);
611 COVERAGE_DEFINE(rev_flow_table
);
612 COVERAGE_DEFINE(rev_inconsistency
);
614 /* Drop keys are odp flow keys which have drop flows installed in the kernel.
615 * These are datapath flows which have no associated ofproto, if they did we
616 * would use facets. */
618 struct hmap_node hmap_node
;
623 /* All datapaths of a given type share a single dpif backer instance. */
628 struct timer next_expiration
;
629 struct hmap odp_to_ofport_map
; /* ODP port to ofport mapping. */
631 struct simap tnl_backers
; /* Set of dpif ports backing tunnels. */
633 /* Facet revalidation flags applying to facets which use this backer. */
634 enum revalidate_reason need_revalidate
; /* Revalidate every facet. */
635 struct tag_set revalidate_set
; /* Revalidate only matching facets. */
637 struct hmap drop_keys
; /* Set of dropped odp keys. */
640 /* All existing ofproto_backer instances, indexed by ofproto->up.type. */
641 static struct shash all_dpif_backers
= SHASH_INITIALIZER(&all_dpif_backers
);
643 static void drop_key_clear(struct dpif_backer
*);
644 static struct ofport_dpif
*
645 odp_port_to_ofport(const struct dpif_backer
*, uint32_t odp_port
);
647 struct ofproto_dpif
{
648 struct hmap_node all_ofproto_dpifs_node
; /* In 'all_ofproto_dpifs'. */
650 struct dpif_backer
*backer
;
652 /* Special OpenFlow rules. */
653 struct rule_dpif
*miss_rule
; /* Sends flow table misses to controller. */
654 struct rule_dpif
*no_packet_in_rule
; /* Drops flow table misses. */
660 struct netflow
*netflow
;
661 struct dpif_sflow
*sflow
;
662 struct hmap bundles
; /* Contains "struct ofbundle"s. */
663 struct mac_learning
*ml
;
664 struct ofmirror
*mirrors
[MAX_MIRRORS
];
666 bool has_bonded_bundles
;
670 struct hmap subfacets
;
671 struct governor
*governor
;
674 struct table_dpif tables
[N_TABLES
];
676 /* Support for debugging async flow mods. */
677 struct list completions
;
679 bool has_bundle_action
; /* True when the first bundle action appears. */
680 struct netdev_stats stats
; /* To account packets generated and consumed in
685 long long int stp_last_tick
;
687 /* VLAN splinters. */
688 struct hmap realdev_vid_map
; /* (realdev,vid) -> vlandev. */
689 struct hmap vlandev_map
; /* vlandev -> (realdev,vid). */
692 struct sset ports
; /* Set of standard port names. */
693 struct sset ghost_ports
; /* Ports with no datapath port. */
694 struct sset port_poll_set
; /* Queued names for port_poll() reply. */
695 int port_poll_errno
; /* Last errno for port_poll() reply. */
698 /* Defer flow mod completion until "ovs-appctl ofproto/unclog"? (Useful only
699 * for debugging the asynchronous flow_mod implementation.) */
702 /* All existing ofproto_dpif instances, indexed by ->up.name. */
703 static struct hmap all_ofproto_dpifs
= HMAP_INITIALIZER(&all_ofproto_dpifs
);
705 static void ofproto_dpif_unixctl_init(void);
707 static struct ofproto_dpif
*
708 ofproto_dpif_cast(const struct ofproto
*ofproto
)
710 ovs_assert(ofproto
->ofproto_class
== &ofproto_dpif_class
);
711 return CONTAINER_OF(ofproto
, struct ofproto_dpif
, up
);
714 static struct ofport_dpif
*get_ofp_port(const struct ofproto_dpif
*,
716 static struct ofport_dpif
*get_odp_port(const struct ofproto_dpif
*,
718 static void ofproto_trace(struct ofproto_dpif
*, const struct flow
*,
719 const struct ofpbuf
*, ovs_be16 initial_tci
,
722 /* Packet processing. */
723 static void update_learning_table(struct ofproto_dpif
*,
724 const struct flow
*, int vlan
,
727 #define FLOW_MISS_MAX_BATCH 50
728 static int handle_upcalls(struct dpif_backer
*, unsigned int max_batch
);
730 /* Flow expiration. */
731 static int expire(struct dpif_backer
*);
734 static void send_netflow_active_timeouts(struct ofproto_dpif
*);
737 static int send_packet(const struct ofport_dpif
*, struct ofpbuf
*packet
);
738 static size_t compose_sflow_action(const struct ofproto_dpif
*,
739 struct ofpbuf
*odp_actions
,
740 const struct flow
*, uint32_t odp_port
);
741 static void add_mirror_actions(struct action_xlate_ctx
*ctx
,
742 const struct flow
*flow
);
743 /* Global variables. */
744 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(1, 5);
746 /* Initial mappings of port to bridge mappings. */
747 static struct shash init_ofp_ports
= SHASH_INITIALIZER(&init_ofp_ports
);
749 /* Factory functions. */
752 init(const struct shash
*iface_hints
)
754 struct shash_node
*node
;
756 /* Make a local copy, since we don't own 'iface_hints' elements. */
757 SHASH_FOR_EACH(node
, iface_hints
) {
758 const struct iface_hint
*orig_hint
= node
->data
;
759 struct iface_hint
*new_hint
= xmalloc(sizeof *new_hint
);
761 new_hint
->br_name
= xstrdup(orig_hint
->br_name
);
762 new_hint
->br_type
= xstrdup(orig_hint
->br_type
);
763 new_hint
->ofp_port
= orig_hint
->ofp_port
;
765 shash_add(&init_ofp_ports
, node
->name
, new_hint
);
770 enumerate_types(struct sset
*types
)
772 dp_enumerate_types(types
);
776 enumerate_names(const char *type
, struct sset
*names
)
778 struct ofproto_dpif
*ofproto
;
781 HMAP_FOR_EACH (ofproto
, all_ofproto_dpifs_node
, &all_ofproto_dpifs
) {
782 if (strcmp(type
, ofproto
->up
.type
)) {
785 sset_add(names
, ofproto
->up
.name
);
792 del(const char *type
, const char *name
)
797 error
= dpif_open(name
, type
, &dpif
);
799 error
= dpif_delete(dpif
);
806 port_open_type(const char *datapath_type
, const char *port_type
)
808 return dpif_port_open_type(datapath_type
, port_type
);
811 /* Type functions. */
813 static struct ofproto_dpif
*
814 lookup_ofproto_dpif_by_port_name(const char *name
)
816 struct ofproto_dpif
*ofproto
;
818 HMAP_FOR_EACH (ofproto
, all_ofproto_dpifs_node
, &all_ofproto_dpifs
) {
819 if (sset_contains(&ofproto
->ports
, name
)) {
828 type_run(const char *type
)
830 struct dpif_backer
*backer
;
834 backer
= shash_find_data(&all_dpif_backers
, type
);
836 /* This is not necessarily a problem, since backers are only
837 * created on demand. */
841 dpif_run(backer
->dpif
);
843 if (backer
->need_revalidate
844 || !tag_set_is_empty(&backer
->revalidate_set
)) {
845 struct tag_set revalidate_set
= backer
->revalidate_set
;
846 bool need_revalidate
= backer
->need_revalidate
;
847 struct ofproto_dpif
*ofproto
;
848 struct simap_node
*node
;
849 struct simap tmp_backers
;
851 /* Handle tunnel garbage collection. */
852 simap_init(&tmp_backers
);
853 simap_swap(&backer
->tnl_backers
, &tmp_backers
);
855 HMAP_FOR_EACH (ofproto
, all_ofproto_dpifs_node
, &all_ofproto_dpifs
) {
856 struct ofport_dpif
*iter
;
858 if (backer
!= ofproto
->backer
) {
862 HMAP_FOR_EACH (iter
, up
.hmap_node
, &ofproto
->up
.ports
) {
865 if (!iter
->tnl_port
) {
869 dp_port
= netdev_vport_get_dpif_port(iter
->up
.netdev
);
870 node
= simap_find(&tmp_backers
, dp_port
);
872 simap_put(&backer
->tnl_backers
, dp_port
, node
->data
);
873 simap_delete(&tmp_backers
, node
);
874 node
= simap_find(&backer
->tnl_backers
, dp_port
);
876 node
= simap_find(&backer
->tnl_backers
, dp_port
);
878 uint32_t odp_port
= UINT32_MAX
;
880 if (!dpif_port_add(backer
->dpif
, iter
->up
.netdev
,
882 simap_put(&backer
->tnl_backers
, dp_port
, odp_port
);
883 node
= simap_find(&backer
->tnl_backers
, dp_port
);
888 iter
->odp_port
= node
? node
->data
: OVSP_NONE
;
889 if (tnl_port_reconfigure(&iter
->up
, iter
->odp_port
,
891 backer
->need_revalidate
= REV_RECONFIGURE
;
896 SIMAP_FOR_EACH (node
, &tmp_backers
) {
897 dpif_port_del(backer
->dpif
, node
->data
);
899 simap_destroy(&tmp_backers
);
901 switch (backer
->need_revalidate
) {
902 case REV_RECONFIGURE
: COVERAGE_INC(rev_reconfigure
); break;
903 case REV_STP
: COVERAGE_INC(rev_stp
); break;
904 case REV_PORT_TOGGLED
: COVERAGE_INC(rev_port_toggled
); break;
905 case REV_FLOW_TABLE
: COVERAGE_INC(rev_flow_table
); break;
906 case REV_INCONSISTENCY
: COVERAGE_INC(rev_inconsistency
); break;
909 if (backer
->need_revalidate
) {
910 /* Clear the drop_keys in case we should now be accepting some
911 * formerly dropped flows. */
912 drop_key_clear(backer
);
915 /* Clear the revalidation flags. */
916 tag_set_init(&backer
->revalidate_set
);
917 backer
->need_revalidate
= 0;
919 HMAP_FOR_EACH (ofproto
, all_ofproto_dpifs_node
, &all_ofproto_dpifs
) {
920 struct facet
*facet
, *next
;
922 if (ofproto
->backer
!= backer
) {
926 HMAP_FOR_EACH_SAFE (facet
, next
, hmap_node
, &ofproto
->facets
) {
928 || tag_set_intersects(&revalidate_set
, facet
->tags
)) {
929 facet_revalidate(facet
);
935 if (timer_expired(&backer
->next_expiration
)) {
936 int delay
= expire(backer
);
937 timer_set_duration(&backer
->next_expiration
, delay
);
940 /* Check for port changes in the dpif. */
941 while ((error
= dpif_port_poll(backer
->dpif
, &devname
)) == 0) {
942 struct ofproto_dpif
*ofproto
;
943 struct dpif_port port
;
945 /* Don't report on the datapath's device. */
946 if (!strcmp(devname
, dpif_base_name(backer
->dpif
))) {
950 HMAP_FOR_EACH (ofproto
, all_ofproto_dpifs_node
,
951 &all_ofproto_dpifs
) {
952 if (simap_contains(&ofproto
->backer
->tnl_backers
, devname
)) {
957 ofproto
= lookup_ofproto_dpif_by_port_name(devname
);
958 if (dpif_port_query_by_name(backer
->dpif
, devname
, &port
)) {
959 /* The port was removed. If we know the datapath,
960 * report it through poll_set(). If we don't, it may be
961 * notifying us of a removal we initiated, so ignore it.
962 * If there's a pending ENOBUFS, let it stand, since
963 * everything will be reevaluated. */
964 if (ofproto
&& ofproto
->port_poll_errno
!= ENOBUFS
) {
965 sset_add(&ofproto
->port_poll_set
, devname
);
966 ofproto
->port_poll_errno
= 0;
968 } else if (!ofproto
) {
969 /* The port was added, but we don't know with which
970 * ofproto we should associate it. Delete it. */
971 dpif_port_del(backer
->dpif
, port
.port_no
);
973 dpif_port_destroy(&port
);
979 if (error
!= EAGAIN
) {
980 struct ofproto_dpif
*ofproto
;
982 /* There was some sort of error, so propagate it to all
983 * ofprotos that use this backer. */
984 HMAP_FOR_EACH (ofproto
, all_ofproto_dpifs_node
,
985 &all_ofproto_dpifs
) {
986 if (ofproto
->backer
== backer
) {
987 sset_clear(&ofproto
->port_poll_set
);
988 ofproto
->port_poll_errno
= error
;
997 type_run_fast(const char *type
)
999 struct dpif_backer
*backer
;
1002 backer
= shash_find_data(&all_dpif_backers
, type
);
1004 /* This is not necessarily a problem, since backers are only
1005 * created on demand. */
1009 /* Handle one or more batches of upcalls, until there's nothing left to do
1010 * or until we do a fixed total amount of work.
1012 * We do work in batches because it can be much cheaper to set up a number
1013 * of flows and fire off their patches all at once. We do multiple batches
1014 * because in some cases handling a packet can cause another packet to be
1015 * queued almost immediately as part of the return flow. Both
1016 * optimizations can make major improvements on some benchmarks and
1017 * presumably for real traffic as well. */
1019 while (work
< FLOW_MISS_MAX_BATCH
) {
1020 int retval
= handle_upcalls(backer
, FLOW_MISS_MAX_BATCH
- work
);
1031 type_wait(const char *type
)
1033 struct dpif_backer
*backer
;
1035 backer
= shash_find_data(&all_dpif_backers
, type
);
1037 /* This is not necessarily a problem, since backers are only
1038 * created on demand. */
1042 timer_wait(&backer
->next_expiration
);
1045 /* Basic life-cycle. */
1047 static int add_internal_flows(struct ofproto_dpif
*);
1049 static struct ofproto
*
1052 struct ofproto_dpif
*ofproto
= xmalloc(sizeof *ofproto
);
1053 return &ofproto
->up
;
1057 dealloc(struct ofproto
*ofproto_
)
1059 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
1064 close_dpif_backer(struct dpif_backer
*backer
)
1066 struct shash_node
*node
;
1068 ovs_assert(backer
->refcount
> 0);
1070 if (--backer
->refcount
) {
1074 drop_key_clear(backer
);
1075 hmap_destroy(&backer
->drop_keys
);
1077 simap_destroy(&backer
->tnl_backers
);
1078 hmap_destroy(&backer
->odp_to_ofport_map
);
1079 node
= shash_find(&all_dpif_backers
, backer
->type
);
1081 shash_delete(&all_dpif_backers
, node
);
1082 dpif_close(backer
->dpif
);
1087 /* Datapath port slated for removal from datapath. */
1088 struct odp_garbage
{
1089 struct list list_node
;
1094 open_dpif_backer(const char *type
, struct dpif_backer
**backerp
)
1096 struct dpif_backer
*backer
;
1097 struct dpif_port_dump port_dump
;
1098 struct dpif_port port
;
1099 struct shash_node
*node
;
1100 struct list garbage_list
;
1101 struct odp_garbage
*garbage
, *next
;
1107 backer
= shash_find_data(&all_dpif_backers
, type
);
1114 backer_name
= xasprintf("ovs-%s", type
);
1116 /* Remove any existing datapaths, since we assume we're the only
1117 * userspace controlling the datapath. */
1119 dp_enumerate_names(type
, &names
);
1120 SSET_FOR_EACH(name
, &names
) {
1121 struct dpif
*old_dpif
;
1123 /* Don't remove our backer if it exists. */
1124 if (!strcmp(name
, backer_name
)) {
1128 if (dpif_open(name
, type
, &old_dpif
)) {
1129 VLOG_WARN("couldn't open old datapath %s to remove it", name
);
1131 dpif_delete(old_dpif
);
1132 dpif_close(old_dpif
);
1135 sset_destroy(&names
);
1137 backer
= xmalloc(sizeof *backer
);
1139 error
= dpif_create_and_open(backer_name
, type
, &backer
->dpif
);
1142 VLOG_ERR("failed to open datapath of type %s: %s", type
,
1148 backer
->type
= xstrdup(type
);
1149 backer
->refcount
= 1;
1150 hmap_init(&backer
->odp_to_ofport_map
);
1151 hmap_init(&backer
->drop_keys
);
1152 timer_set_duration(&backer
->next_expiration
, 1000);
1153 backer
->need_revalidate
= 0;
1154 simap_init(&backer
->tnl_backers
);
1155 tag_set_init(&backer
->revalidate_set
);
1158 dpif_flow_flush(backer
->dpif
);
1160 /* Loop through the ports already on the datapath and remove any
1161 * that we don't need anymore. */
1162 list_init(&garbage_list
);
1163 dpif_port_dump_start(&port_dump
, backer
->dpif
);
1164 while (dpif_port_dump_next(&port_dump
, &port
)) {
1165 node
= shash_find(&init_ofp_ports
, port
.name
);
1166 if (!node
&& strcmp(port
.name
, dpif_base_name(backer
->dpif
))) {
1167 garbage
= xmalloc(sizeof *garbage
);
1168 garbage
->odp_port
= port
.port_no
;
1169 list_push_front(&garbage_list
, &garbage
->list_node
);
1172 dpif_port_dump_done(&port_dump
);
1174 LIST_FOR_EACH_SAFE (garbage
, next
, list_node
, &garbage_list
) {
1175 dpif_port_del(backer
->dpif
, garbage
->odp_port
);
1176 list_remove(&garbage
->list_node
);
1180 shash_add(&all_dpif_backers
, type
, backer
);
1182 error
= dpif_recv_set(backer
->dpif
, true);
1184 VLOG_ERR("failed to listen on datapath of type %s: %s",
1185 type
, strerror(error
));
1186 close_dpif_backer(backer
);
1194 construct(struct ofproto
*ofproto_
)
1196 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
1197 struct shash_node
*node
, *next
;
1202 error
= open_dpif_backer(ofproto
->up
.type
, &ofproto
->backer
);
1207 max_ports
= dpif_get_max_ports(ofproto
->backer
->dpif
);
1208 ofproto_init_max_ports(ofproto_
, MIN(max_ports
, OFPP_MAX
));
1210 ofproto
->n_matches
= 0;
1212 ofproto
->netflow
= NULL
;
1213 ofproto
->sflow
= NULL
;
1214 ofproto
->stp
= NULL
;
1215 hmap_init(&ofproto
->bundles
);
1216 ofproto
->ml
= mac_learning_create(MAC_ENTRY_DEFAULT_IDLE_TIME
);
1217 for (i
= 0; i
< MAX_MIRRORS
; i
++) {
1218 ofproto
->mirrors
[i
] = NULL
;
1220 ofproto
->has_bonded_bundles
= false;
1222 hmap_init(&ofproto
->facets
);
1223 hmap_init(&ofproto
->subfacets
);
1224 ofproto
->governor
= NULL
;
1226 for (i
= 0; i
< N_TABLES
; i
++) {
1227 struct table_dpif
*table
= &ofproto
->tables
[i
];
1229 table
->catchall_table
= NULL
;
1230 table
->other_table
= NULL
;
1231 table
->basis
= random_uint32();
1234 list_init(&ofproto
->completions
);
1236 ofproto_dpif_unixctl_init();
1238 ofproto
->has_mirrors
= false;
1239 ofproto
->has_bundle_action
= false;
1241 hmap_init(&ofproto
->vlandev_map
);
1242 hmap_init(&ofproto
->realdev_vid_map
);
1244 sset_init(&ofproto
->ports
);
1245 sset_init(&ofproto
->ghost_ports
);
1246 sset_init(&ofproto
->port_poll_set
);
1247 ofproto
->port_poll_errno
= 0;
1249 SHASH_FOR_EACH_SAFE (node
, next
, &init_ofp_ports
) {
1250 struct iface_hint
*iface_hint
= node
->data
;
1252 if (!strcmp(iface_hint
->br_name
, ofproto
->up
.name
)) {
1253 /* Check if the datapath already has this port. */
1254 if (dpif_port_exists(ofproto
->backer
->dpif
, node
->name
)) {
1255 sset_add(&ofproto
->ports
, node
->name
);
1258 free(iface_hint
->br_name
);
1259 free(iface_hint
->br_type
);
1261 shash_delete(&init_ofp_ports
, node
);
1265 hmap_insert(&all_ofproto_dpifs
, &ofproto
->all_ofproto_dpifs_node
,
1266 hash_string(ofproto
->up
.name
, 0));
1267 memset(&ofproto
->stats
, 0, sizeof ofproto
->stats
);
1269 ofproto_init_tables(ofproto_
, N_TABLES
);
1270 error
= add_internal_flows(ofproto
);
1271 ofproto
->up
.tables
[TBL_INTERNAL
].flags
= OFTABLE_HIDDEN
| OFTABLE_READONLY
;
1277 add_internal_flow(struct ofproto_dpif
*ofproto
, int id
,
1278 const struct ofpbuf
*ofpacts
, struct rule_dpif
**rulep
)
1280 struct ofputil_flow_mod fm
;
1283 match_init_catchall(&fm
.match
);
1285 match_set_reg(&fm
.match
, 0, id
);
1286 fm
.new_cookie
= htonll(0);
1287 fm
.cookie
= htonll(0);
1288 fm
.cookie_mask
= htonll(0);
1289 fm
.table_id
= TBL_INTERNAL
;
1290 fm
.command
= OFPFC_ADD
;
1291 fm
.idle_timeout
= 0;
1292 fm
.hard_timeout
= 0;
1296 fm
.ofpacts
= ofpacts
->data
;
1297 fm
.ofpacts_len
= ofpacts
->size
;
1299 error
= ofproto_flow_mod(&ofproto
->up
, &fm
);
1301 VLOG_ERR_RL(&rl
, "failed to add internal flow %d (%s)",
1302 id
, ofperr_to_string(error
));
1306 *rulep
= rule_dpif_lookup__(ofproto
, &fm
.match
.flow
, TBL_INTERNAL
);
1307 ovs_assert(*rulep
!= NULL
);
1313 add_internal_flows(struct ofproto_dpif
*ofproto
)
1315 struct ofpact_controller
*controller
;
1316 uint64_t ofpacts_stub
[128 / 8];
1317 struct ofpbuf ofpacts
;
1321 ofpbuf_use_stack(&ofpacts
, ofpacts_stub
, sizeof ofpacts_stub
);
1324 controller
= ofpact_put_CONTROLLER(&ofpacts
);
1325 controller
->max_len
= UINT16_MAX
;
1326 controller
->controller_id
= 0;
1327 controller
->reason
= OFPR_NO_MATCH
;
1328 ofpact_pad(&ofpacts
);
1330 error
= add_internal_flow(ofproto
, id
++, &ofpacts
, &ofproto
->miss_rule
);
1335 ofpbuf_clear(&ofpacts
);
1336 error
= add_internal_flow(ofproto
, id
++, &ofpacts
,
1337 &ofproto
->no_packet_in_rule
);
1342 complete_operations(struct ofproto_dpif
*ofproto
)
1344 struct dpif_completion
*c
, *next
;
1346 LIST_FOR_EACH_SAFE (c
, next
, list_node
, &ofproto
->completions
) {
1347 ofoperation_complete(c
->op
, 0);
1348 list_remove(&c
->list_node
);
1354 destruct(struct ofproto
*ofproto_
)
1356 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
1357 struct rule_dpif
*rule
, *next_rule
;
1358 struct oftable
*table
;
1361 hmap_remove(&all_ofproto_dpifs
, &ofproto
->all_ofproto_dpifs_node
);
1362 complete_operations(ofproto
);
1364 OFPROTO_FOR_EACH_TABLE (table
, &ofproto
->up
) {
1365 struct cls_cursor cursor
;
1367 cls_cursor_init(&cursor
, &table
->cls
, NULL
);
1368 CLS_CURSOR_FOR_EACH_SAFE (rule
, next_rule
, up
.cr
, &cursor
) {
1369 ofproto_rule_destroy(&rule
->up
);
1373 for (i
= 0; i
< MAX_MIRRORS
; i
++) {
1374 mirror_destroy(ofproto
->mirrors
[i
]);
1377 netflow_destroy(ofproto
->netflow
);
1378 dpif_sflow_destroy(ofproto
->sflow
);
1379 hmap_destroy(&ofproto
->bundles
);
1380 mac_learning_destroy(ofproto
->ml
);
1382 hmap_destroy(&ofproto
->facets
);
1383 hmap_destroy(&ofproto
->subfacets
);
1384 governor_destroy(ofproto
->governor
);
1386 hmap_destroy(&ofproto
->vlandev_map
);
1387 hmap_destroy(&ofproto
->realdev_vid_map
);
1389 sset_destroy(&ofproto
->ports
);
1390 sset_destroy(&ofproto
->ghost_ports
);
1391 sset_destroy(&ofproto
->port_poll_set
);
1393 close_dpif_backer(ofproto
->backer
);
1397 run_fast(struct ofproto
*ofproto_
)
1399 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
1400 struct ofport_dpif
*ofport
;
1402 HMAP_FOR_EACH (ofport
, up
.hmap_node
, &ofproto
->up
.ports
) {
1403 port_run_fast(ofport
);
1410 run(struct ofproto
*ofproto_
)
1412 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
1413 struct ofport_dpif
*ofport
;
1414 struct ofbundle
*bundle
;
1418 complete_operations(ofproto
);
1421 error
= run_fast(ofproto_
);
1426 if (ofproto
->netflow
) {
1427 if (netflow_run(ofproto
->netflow
)) {
1428 send_netflow_active_timeouts(ofproto
);
1431 if (ofproto
->sflow
) {
1432 dpif_sflow_run(ofproto
->sflow
);
1435 HMAP_FOR_EACH (ofport
, up
.hmap_node
, &ofproto
->up
.ports
) {
1438 HMAP_FOR_EACH (bundle
, hmap_node
, &ofproto
->bundles
) {
1443 mac_learning_run(ofproto
->ml
, &ofproto
->backer
->revalidate_set
);
1445 /* Check the consistency of a random facet, to aid debugging. */
1446 if (!hmap_is_empty(&ofproto
->facets
)
1447 && !ofproto
->backer
->need_revalidate
) {
1448 struct facet
*facet
;
1450 facet
= CONTAINER_OF(hmap_random_node(&ofproto
->facets
),
1451 struct facet
, hmap_node
);
1452 if (!tag_set_intersects(&ofproto
->backer
->revalidate_set
,
1454 if (!facet_check_consistency(facet
)) {
1455 ofproto
->backer
->need_revalidate
= REV_INCONSISTENCY
;
1460 if (ofproto
->governor
) {
1463 governor_run(ofproto
->governor
);
1465 /* If the governor has shrunk to its minimum size and the number of
1466 * subfacets has dwindled, then drop the governor entirely.
1468 * For hysteresis, the number of subfacets to drop the governor is
1469 * smaller than the number needed to trigger its creation. */
1470 n_subfacets
= hmap_count(&ofproto
->subfacets
);
1471 if (n_subfacets
* 4 < ofproto
->up
.flow_eviction_threshold
1472 && governor_is_idle(ofproto
->governor
)) {
1473 governor_destroy(ofproto
->governor
);
1474 ofproto
->governor
= NULL
;
1482 wait(struct ofproto
*ofproto_
)
1484 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
1485 struct ofport_dpif
*ofport
;
1486 struct ofbundle
*bundle
;
1488 if (!clogged
&& !list_is_empty(&ofproto
->completions
)) {
1489 poll_immediate_wake();
1492 dpif_wait(ofproto
->backer
->dpif
);
1493 dpif_recv_wait(ofproto
->backer
->dpif
);
1494 if (ofproto
->sflow
) {
1495 dpif_sflow_wait(ofproto
->sflow
);
1497 if (!tag_set_is_empty(&ofproto
->backer
->revalidate_set
)) {
1498 poll_immediate_wake();
1500 HMAP_FOR_EACH (ofport
, up
.hmap_node
, &ofproto
->up
.ports
) {
1503 HMAP_FOR_EACH (bundle
, hmap_node
, &ofproto
->bundles
) {
1504 bundle_wait(bundle
);
1506 if (ofproto
->netflow
) {
1507 netflow_wait(ofproto
->netflow
);
1509 mac_learning_wait(ofproto
->ml
);
1511 if (ofproto
->backer
->need_revalidate
) {
1512 /* Shouldn't happen, but if it does just go around again. */
1513 VLOG_DBG_RL(&rl
, "need revalidate in ofproto_wait_cb()");
1514 poll_immediate_wake();
1516 if (ofproto
->governor
) {
1517 governor_wait(ofproto
->governor
);
1522 get_memory_usage(const struct ofproto
*ofproto_
, struct simap
*usage
)
1524 const struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
1526 simap_increase(usage
, "facets", hmap_count(&ofproto
->facets
));
1527 simap_increase(usage
, "subfacets", hmap_count(&ofproto
->subfacets
));
1531 flush(struct ofproto
*ofproto_
)
1533 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
1534 struct subfacet
*subfacet
, *next_subfacet
;
1535 struct subfacet
*batch
[SUBFACET_DESTROY_MAX_BATCH
];
1539 HMAP_FOR_EACH_SAFE (subfacet
, next_subfacet
, hmap_node
,
1540 &ofproto
->subfacets
) {
1541 if (subfacet
->path
!= SF_NOT_INSTALLED
) {
1542 batch
[n_batch
++] = subfacet
;
1543 if (n_batch
>= SUBFACET_DESTROY_MAX_BATCH
) {
1544 subfacet_destroy_batch(ofproto
, batch
, n_batch
);
1548 subfacet_destroy(subfacet
);
1553 subfacet_destroy_batch(ofproto
, batch
, n_batch
);
1558 get_features(struct ofproto
*ofproto_ OVS_UNUSED
,
1559 bool *arp_match_ip
, enum ofputil_action_bitmap
*actions
)
1561 *arp_match_ip
= true;
1562 *actions
= (OFPUTIL_A_OUTPUT
|
1563 OFPUTIL_A_SET_VLAN_VID
|
1564 OFPUTIL_A_SET_VLAN_PCP
|
1565 OFPUTIL_A_STRIP_VLAN
|
1566 OFPUTIL_A_SET_DL_SRC
|
1567 OFPUTIL_A_SET_DL_DST
|
1568 OFPUTIL_A_SET_NW_SRC
|
1569 OFPUTIL_A_SET_NW_DST
|
1570 OFPUTIL_A_SET_NW_TOS
|
1571 OFPUTIL_A_SET_TP_SRC
|
1572 OFPUTIL_A_SET_TP_DST
|
1577 get_tables(struct ofproto
*ofproto_
, struct ofp12_table_stats
*ots
)
1579 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
1580 struct dpif_dp_stats s
;
1582 strcpy(ots
->name
, "classifier");
1584 dpif_get_dp_stats(ofproto
->backer
->dpif
, &s
);
1586 ots
->lookup_count
= htonll(s
.n_hit
+ s
.n_missed
);
1587 ots
->matched_count
= htonll(s
.n_hit
+ ofproto
->n_matches
);
1590 static struct ofport
*
1593 struct ofport_dpif
*port
= xmalloc(sizeof *port
);
1598 port_dealloc(struct ofport
*port_
)
1600 struct ofport_dpif
*port
= ofport_dpif_cast(port_
);
1605 port_construct(struct ofport
*port_
)
1607 struct ofport_dpif
*port
= ofport_dpif_cast(port_
);
1608 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(port
->up
.ofproto
);
1609 const struct netdev
*netdev
= port
->up
.netdev
;
1610 struct dpif_port dpif_port
;
1613 ofproto
->backer
->need_revalidate
= REV_RECONFIGURE
;
1614 port
->bundle
= NULL
;
1616 port
->tag
= tag_create_random();
1617 port
->may_enable
= true;
1618 port
->stp_port
= NULL
;
1619 port
->stp_state
= STP_DISABLED
;
1620 port
->tnl_port
= NULL
;
1621 hmap_init(&port
->priorities
);
1622 port
->realdev_ofp_port
= 0;
1623 port
->vlandev_vid
= 0;
1624 port
->carrier_seq
= netdev_get_carrier_resets(netdev
);
1626 if (netdev_vport_is_patch(netdev
)) {
1627 /* XXX By bailing out here, we don't do required sFlow work. */
1628 port
->odp_port
= OVSP_NONE
;
1632 error
= dpif_port_query_by_name(ofproto
->backer
->dpif
,
1633 netdev_vport_get_dpif_port(netdev
),
1639 port
->odp_port
= dpif_port
.port_no
;
1641 if (netdev_get_tunnel_config(netdev
)) {
1642 port
->tnl_port
= tnl_port_add(&port
->up
, port
->odp_port
);
1644 /* Sanity-check that a mapping doesn't already exist. This
1645 * shouldn't happen for non-tunnel ports. */
1646 if (odp_port_to_ofp_port(ofproto
, port
->odp_port
) != OFPP_NONE
) {
1647 VLOG_ERR("port %s already has an OpenFlow port number",
1649 dpif_port_destroy(&dpif_port
);
1653 hmap_insert(&ofproto
->backer
->odp_to_ofport_map
, &port
->odp_port_node
,
1654 hash_int(port
->odp_port
, 0));
1656 dpif_port_destroy(&dpif_port
);
1658 if (ofproto
->sflow
) {
1659 dpif_sflow_add_port(ofproto
->sflow
, port_
, port
->odp_port
);
1666 port_destruct(struct ofport
*port_
)
1668 struct ofport_dpif
*port
= ofport_dpif_cast(port_
);
1669 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(port
->up
.ofproto
);
1670 const char *dp_port_name
= netdev_vport_get_dpif_port(port
->up
.netdev
);
1671 const char *devname
= netdev_get_name(port
->up
.netdev
);
1673 if (dpif_port_exists(ofproto
->backer
->dpif
, dp_port_name
)) {
1674 /* The underlying device is still there, so delete it. This
1675 * happens when the ofproto is being destroyed, since the caller
1676 * assumes that removal of attached ports will happen as part of
1678 if (!port
->tnl_port
) {
1679 dpif_port_del(ofproto
->backer
->dpif
, port
->odp_port
);
1681 ofproto
->backer
->need_revalidate
= REV_RECONFIGURE
;
1684 if (port
->odp_port
!= OVSP_NONE
&& !port
->tnl_port
) {
1685 hmap_remove(&ofproto
->backer
->odp_to_ofport_map
, &port
->odp_port_node
);
1688 tnl_port_del(port
->tnl_port
);
1689 sset_find_and_delete(&ofproto
->ports
, devname
);
1690 sset_find_and_delete(&ofproto
->ghost_ports
, devname
);
1691 ofproto
->backer
->need_revalidate
= REV_RECONFIGURE
;
1692 bundle_remove(port_
);
1693 set_cfm(port_
, NULL
);
1694 if (ofproto
->sflow
) {
1695 dpif_sflow_del_port(ofproto
->sflow
, port
->odp_port
);
1698 ofport_clear_priorities(port
);
1699 hmap_destroy(&port
->priorities
);
1703 port_modified(struct ofport
*port_
)
1705 struct ofport_dpif
*port
= ofport_dpif_cast(port_
);
1707 if (port
->bundle
&& port
->bundle
->bond
) {
1708 bond_slave_set_netdev(port
->bundle
->bond
, port
, port
->up
.netdev
);
1713 port_reconfigured(struct ofport
*port_
, enum ofputil_port_config old_config
)
1715 struct ofport_dpif
*port
= ofport_dpif_cast(port_
);
1716 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(port
->up
.ofproto
);
1717 enum ofputil_port_config changed
= old_config
^ port
->up
.pp
.config
;
1719 if (changed
& (OFPUTIL_PC_NO_RECV
| OFPUTIL_PC_NO_RECV_STP
|
1720 OFPUTIL_PC_NO_FWD
| OFPUTIL_PC_NO_FLOOD
|
1721 OFPUTIL_PC_NO_PACKET_IN
)) {
1722 ofproto
->backer
->need_revalidate
= REV_RECONFIGURE
;
1724 if (changed
& OFPUTIL_PC_NO_FLOOD
&& port
->bundle
) {
1725 bundle_update(port
->bundle
);
1731 set_sflow(struct ofproto
*ofproto_
,
1732 const struct ofproto_sflow_options
*sflow_options
)
1734 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
1735 struct dpif_sflow
*ds
= ofproto
->sflow
;
1737 if (sflow_options
) {
1739 struct ofport_dpif
*ofport
;
1741 ds
= ofproto
->sflow
= dpif_sflow_create();
1742 HMAP_FOR_EACH (ofport
, up
.hmap_node
, &ofproto
->up
.ports
) {
1743 dpif_sflow_add_port(ds
, &ofport
->up
, ofport
->odp_port
);
1745 ofproto
->backer
->need_revalidate
= REV_RECONFIGURE
;
1747 dpif_sflow_set_options(ds
, sflow_options
);
1750 dpif_sflow_destroy(ds
);
1751 ofproto
->backer
->need_revalidate
= REV_RECONFIGURE
;
1752 ofproto
->sflow
= NULL
;
1759 set_cfm(struct ofport
*ofport_
, const struct cfm_settings
*s
)
1761 struct ofport_dpif
*ofport
= ofport_dpif_cast(ofport_
);
1768 struct ofproto_dpif
*ofproto
;
1770 ofproto
= ofproto_dpif_cast(ofport
->up
.ofproto
);
1771 ofproto
->backer
->need_revalidate
= REV_RECONFIGURE
;
1772 ofport
->cfm
= cfm_create(netdev_get_name(ofport
->up
.netdev
));
1775 if (cfm_configure(ofport
->cfm
, s
)) {
1781 cfm_destroy(ofport
->cfm
);
1787 get_cfm_status(const struct ofport
*ofport_
,
1788 struct ofproto_cfm_status
*status
)
1790 struct ofport_dpif
*ofport
= ofport_dpif_cast(ofport_
);
1793 status
->faults
= cfm_get_fault(ofport
->cfm
);
1794 status
->remote_opstate
= cfm_get_opup(ofport
->cfm
);
1795 status
->health
= cfm_get_health(ofport
->cfm
);
1796 cfm_get_remote_mpids(ofport
->cfm
, &status
->rmps
, &status
->n_rmps
);
1803 /* Spanning Tree. */
1806 send_bpdu_cb(struct ofpbuf
*pkt
, int port_num
, void *ofproto_
)
1808 struct ofproto_dpif
*ofproto
= ofproto_
;
1809 struct stp_port
*sp
= stp_get_port(ofproto
->stp
, port_num
);
1810 struct ofport_dpif
*ofport
;
1812 ofport
= stp_port_get_aux(sp
);
1814 VLOG_WARN_RL(&rl
, "%s: cannot send BPDU on unknown port %d",
1815 ofproto
->up
.name
, port_num
);
1817 struct eth_header
*eth
= pkt
->l2
;
1819 netdev_get_etheraddr(ofport
->up
.netdev
, eth
->eth_src
);
1820 if (eth_addr_is_zero(eth
->eth_src
)) {
1821 VLOG_WARN_RL(&rl
, "%s: cannot send BPDU on port %d "
1822 "with unknown MAC", ofproto
->up
.name
, port_num
);
1824 send_packet(ofport
, pkt
);
1830 /* Configures STP on 'ofproto_' using the settings defined in 's'. */
1832 set_stp(struct ofproto
*ofproto_
, const struct ofproto_stp_settings
*s
)
1834 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
1836 /* Only revalidate flows if the configuration changed. */
1837 if (!s
!= !ofproto
->stp
) {
1838 ofproto
->backer
->need_revalidate
= REV_RECONFIGURE
;
1842 if (!ofproto
->stp
) {
1843 ofproto
->stp
= stp_create(ofproto_
->name
, s
->system_id
,
1844 send_bpdu_cb
, ofproto
);
1845 ofproto
->stp_last_tick
= time_msec();
1848 stp_set_bridge_id(ofproto
->stp
, s
->system_id
);
1849 stp_set_bridge_priority(ofproto
->stp
, s
->priority
);
1850 stp_set_hello_time(ofproto
->stp
, s
->hello_time
);
1851 stp_set_max_age(ofproto
->stp
, s
->max_age
);
1852 stp_set_forward_delay(ofproto
->stp
, s
->fwd_delay
);
1854 struct ofport
*ofport
;
1856 HMAP_FOR_EACH (ofport
, hmap_node
, &ofproto
->up
.ports
) {
1857 set_stp_port(ofport
, NULL
);
1860 stp_destroy(ofproto
->stp
);
1861 ofproto
->stp
= NULL
;
1868 get_stp_status(struct ofproto
*ofproto_
, struct ofproto_stp_status
*s
)
1870 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
1874 s
->bridge_id
= stp_get_bridge_id(ofproto
->stp
);
1875 s
->designated_root
= stp_get_designated_root(ofproto
->stp
);
1876 s
->root_path_cost
= stp_get_root_path_cost(ofproto
->stp
);
1885 update_stp_port_state(struct ofport_dpif
*ofport
)
1887 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofport
->up
.ofproto
);
1888 enum stp_state state
;
1890 /* Figure out new state. */
1891 state
= ofport
->stp_port
? stp_port_get_state(ofport
->stp_port
)
1895 if (ofport
->stp_state
!= state
) {
1896 enum ofputil_port_state of_state
;
1899 VLOG_DBG_RL(&rl
, "port %s: STP state changed from %s to %s",
1900 netdev_get_name(ofport
->up
.netdev
),
1901 stp_state_name(ofport
->stp_state
),
1902 stp_state_name(state
));
1903 if (stp_learn_in_state(ofport
->stp_state
)
1904 != stp_learn_in_state(state
)) {
1905 /* xxx Learning action flows should also be flushed. */
1906 mac_learning_flush(ofproto
->ml
,
1907 &ofproto
->backer
->revalidate_set
);
1909 fwd_change
= stp_forward_in_state(ofport
->stp_state
)
1910 != stp_forward_in_state(state
);
1912 ofproto
->backer
->need_revalidate
= REV_STP
;
1913 ofport
->stp_state
= state
;
1914 ofport
->stp_state_entered
= time_msec();
1916 if (fwd_change
&& ofport
->bundle
) {
1917 bundle_update(ofport
->bundle
);
1920 /* Update the STP state bits in the OpenFlow port description. */
1921 of_state
= ofport
->up
.pp
.state
& ~OFPUTIL_PS_STP_MASK
;
1922 of_state
|= (state
== STP_LISTENING
? OFPUTIL_PS_STP_LISTEN
1923 : state
== STP_LEARNING
? OFPUTIL_PS_STP_LEARN
1924 : state
== STP_FORWARDING
? OFPUTIL_PS_STP_FORWARD
1925 : state
== STP_BLOCKING
? OFPUTIL_PS_STP_BLOCK
1927 ofproto_port_set_state(&ofport
->up
, of_state
);
1931 /* Configures STP on 'ofport_' using the settings defined in 's'. The
1932 * caller is responsible for assigning STP port numbers and ensuring
1933 * there are no duplicates. */
1935 set_stp_port(struct ofport
*ofport_
,
1936 const struct ofproto_port_stp_settings
*s
)
1938 struct ofport_dpif
*ofport
= ofport_dpif_cast(ofport_
);
1939 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofport
->up
.ofproto
);
1940 struct stp_port
*sp
= ofport
->stp_port
;
1942 if (!s
|| !s
->enable
) {
1944 ofport
->stp_port
= NULL
;
1945 stp_port_disable(sp
);
1946 update_stp_port_state(ofport
);
1949 } else if (sp
&& stp_port_no(sp
) != s
->port_num
1950 && ofport
== stp_port_get_aux(sp
)) {
1951 /* The port-id changed, so disable the old one if it's not
1952 * already in use by another port. */
1953 stp_port_disable(sp
);
1956 sp
= ofport
->stp_port
= stp_get_port(ofproto
->stp
, s
->port_num
);
1957 stp_port_enable(sp
);
1959 stp_port_set_aux(sp
, ofport
);
1960 stp_port_set_priority(sp
, s
->priority
);
1961 stp_port_set_path_cost(sp
, s
->path_cost
);
1963 update_stp_port_state(ofport
);
1969 get_stp_port_status(struct ofport
*ofport_
,
1970 struct ofproto_port_stp_status
*s
)
1972 struct ofport_dpif
*ofport
= ofport_dpif_cast(ofport_
);
1973 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofport
->up
.ofproto
);
1974 struct stp_port
*sp
= ofport
->stp_port
;
1976 if (!ofproto
->stp
|| !sp
) {
1982 s
->port_id
= stp_port_get_id(sp
);
1983 s
->state
= stp_port_get_state(sp
);
1984 s
->sec_in_state
= (time_msec() - ofport
->stp_state_entered
) / 1000;
1985 s
->role
= stp_port_get_role(sp
);
1986 stp_port_get_counts(sp
, &s
->tx_count
, &s
->rx_count
, &s
->error_count
);
1992 stp_run(struct ofproto_dpif
*ofproto
)
1995 long long int now
= time_msec();
1996 long long int elapsed
= now
- ofproto
->stp_last_tick
;
1997 struct stp_port
*sp
;
2000 stp_tick(ofproto
->stp
, MIN(INT_MAX
, elapsed
));
2001 ofproto
->stp_last_tick
= now
;
2003 while (stp_get_changed_port(ofproto
->stp
, &sp
)) {
2004 struct ofport_dpif
*ofport
= stp_port_get_aux(sp
);
2007 update_stp_port_state(ofport
);
2011 if (stp_check_and_reset_fdb_flush(ofproto
->stp
)) {
2012 mac_learning_flush(ofproto
->ml
, &ofproto
->backer
->revalidate_set
);
2018 stp_wait(struct ofproto_dpif
*ofproto
)
2021 poll_timer_wait(1000);
2025 /* Returns true if STP should process 'flow'. */
2027 stp_should_process_flow(const struct flow
*flow
)
2029 return eth_addr_equals(flow
->dl_dst
, eth_addr_stp
);
2033 stp_process_packet(const struct ofport_dpif
*ofport
,
2034 const struct ofpbuf
*packet
)
2036 struct ofpbuf payload
= *packet
;
2037 struct eth_header
*eth
= payload
.data
;
2038 struct stp_port
*sp
= ofport
->stp_port
;
2040 /* Sink packets on ports that have STP disabled when the bridge has
2042 if (!sp
|| stp_port_get_state(sp
) == STP_DISABLED
) {
2046 /* Trim off padding on payload. */
2047 if (payload
.size
> ntohs(eth
->eth_type
) + ETH_HEADER_LEN
) {
2048 payload
.size
= ntohs(eth
->eth_type
) + ETH_HEADER_LEN
;
2051 if (ofpbuf_try_pull(&payload
, ETH_HEADER_LEN
+ LLC_HEADER_LEN
)) {
2052 stp_received_bpdu(sp
, payload
.data
, payload
.size
);
2056 static struct priority_to_dscp
*
2057 get_priority(const struct ofport_dpif
*ofport
, uint32_t priority
)
2059 struct priority_to_dscp
*pdscp
;
2062 hash
= hash_int(priority
, 0);
2063 HMAP_FOR_EACH_IN_BUCKET (pdscp
, hmap_node
, hash
, &ofport
->priorities
) {
2064 if (pdscp
->priority
== priority
) {
2072 ofport_clear_priorities(struct ofport_dpif
*ofport
)
2074 struct priority_to_dscp
*pdscp
, *next
;
2076 HMAP_FOR_EACH_SAFE (pdscp
, next
, hmap_node
, &ofport
->priorities
) {
2077 hmap_remove(&ofport
->priorities
, &pdscp
->hmap_node
);
2083 set_queues(struct ofport
*ofport_
,
2084 const struct ofproto_port_queue
*qdscp_list
,
2087 struct ofport_dpif
*ofport
= ofport_dpif_cast(ofport_
);
2088 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofport
->up
.ofproto
);
2089 struct hmap
new = HMAP_INITIALIZER(&new);
2092 for (i
= 0; i
< n_qdscp
; i
++) {
2093 struct priority_to_dscp
*pdscp
;
2097 dscp
= (qdscp_list
[i
].dscp
<< 2) & IP_DSCP_MASK
;
2098 if (dpif_queue_to_priority(ofproto
->backer
->dpif
, qdscp_list
[i
].queue
,
2103 pdscp
= get_priority(ofport
, priority
);
2105 hmap_remove(&ofport
->priorities
, &pdscp
->hmap_node
);
2107 pdscp
= xmalloc(sizeof *pdscp
);
2108 pdscp
->priority
= priority
;
2110 ofproto
->backer
->need_revalidate
= REV_RECONFIGURE
;
2113 if (pdscp
->dscp
!= dscp
) {
2115 ofproto
->backer
->need_revalidate
= REV_RECONFIGURE
;
2118 hmap_insert(&new, &pdscp
->hmap_node
, hash_int(pdscp
->priority
, 0));
2121 if (!hmap_is_empty(&ofport
->priorities
)) {
2122 ofport_clear_priorities(ofport
);
2123 ofproto
->backer
->need_revalidate
= REV_RECONFIGURE
;
2126 hmap_swap(&new, &ofport
->priorities
);
2134 /* Expires all MAC learning entries associated with 'bundle' and forces its
2135 * ofproto to revalidate every flow.
2137 * Normally MAC learning entries are removed only from the ofproto associated
2138 * with 'bundle', but if 'all_ofprotos' is true, then the MAC learning entries
2139 * are removed from every ofproto. When patch ports and SLB bonds are in use
2140 * and a VM migration happens and the gratuitous ARPs are somehow lost, this
2141 * avoids a MAC_ENTRY_IDLE_TIME delay before the migrated VM can communicate
2142 * with the host from which it migrated. */
2144 bundle_flush_macs(struct ofbundle
*bundle
, bool all_ofprotos
)
2146 struct ofproto_dpif
*ofproto
= bundle
->ofproto
;
2147 struct mac_learning
*ml
= ofproto
->ml
;
2148 struct mac_entry
*mac
, *next_mac
;
2150 ofproto
->backer
->need_revalidate
= REV_RECONFIGURE
;
2151 LIST_FOR_EACH_SAFE (mac
, next_mac
, lru_node
, &ml
->lrus
) {
2152 if (mac
->port
.p
== bundle
) {
2154 struct ofproto_dpif
*o
;
2156 HMAP_FOR_EACH (o
, all_ofproto_dpifs_node
, &all_ofproto_dpifs
) {
2158 struct mac_entry
*e
;
2160 e
= mac_learning_lookup(o
->ml
, mac
->mac
, mac
->vlan
,
2163 mac_learning_expire(o
->ml
, e
);
2169 mac_learning_expire(ml
, mac
);
2174 static struct ofbundle
*
2175 bundle_lookup(const struct ofproto_dpif
*ofproto
, void *aux
)
2177 struct ofbundle
*bundle
;
2179 HMAP_FOR_EACH_IN_BUCKET (bundle
, hmap_node
, hash_pointer(aux
, 0),
2180 &ofproto
->bundles
) {
2181 if (bundle
->aux
== aux
) {
2188 /* Looks up each of the 'n_auxes' pointers in 'auxes' as bundles and adds the
2189 * ones that are found to 'bundles'. */
2191 bundle_lookup_multiple(struct ofproto_dpif
*ofproto
,
2192 void **auxes
, size_t n_auxes
,
2193 struct hmapx
*bundles
)
2197 hmapx_init(bundles
);
2198 for (i
= 0; i
< n_auxes
; i
++) {
2199 struct ofbundle
*bundle
= bundle_lookup(ofproto
, auxes
[i
]);
2201 hmapx_add(bundles
, bundle
);
2207 bundle_update(struct ofbundle
*bundle
)
2209 struct ofport_dpif
*port
;
2211 bundle
->floodable
= true;
2212 LIST_FOR_EACH (port
, bundle_node
, &bundle
->ports
) {
2213 if (port
->up
.pp
.config
& OFPUTIL_PC_NO_FLOOD
2214 || !stp_forward_in_state(port
->stp_state
)) {
2215 bundle
->floodable
= false;
2222 bundle_del_port(struct ofport_dpif
*port
)
2224 struct ofbundle
*bundle
= port
->bundle
;
2226 bundle
->ofproto
->backer
->need_revalidate
= REV_RECONFIGURE
;
2228 list_remove(&port
->bundle_node
);
2229 port
->bundle
= NULL
;
2232 lacp_slave_unregister(bundle
->lacp
, port
);
2235 bond_slave_unregister(bundle
->bond
, port
);
2238 bundle_update(bundle
);
2242 bundle_add_port(struct ofbundle
*bundle
, uint32_t ofp_port
,
2243 struct lacp_slave_settings
*lacp
)
2245 struct ofport_dpif
*port
;
2247 port
= get_ofp_port(bundle
->ofproto
, ofp_port
);
2252 if (port
->bundle
!= bundle
) {
2253 bundle
->ofproto
->backer
->need_revalidate
= REV_RECONFIGURE
;
2255 bundle_del_port(port
);
2258 port
->bundle
= bundle
;
2259 list_push_back(&bundle
->ports
, &port
->bundle_node
);
2260 if (port
->up
.pp
.config
& OFPUTIL_PC_NO_FLOOD
2261 || !stp_forward_in_state(port
->stp_state
)) {
2262 bundle
->floodable
= false;
2266 bundle
->ofproto
->backer
->need_revalidate
= REV_RECONFIGURE
;
2267 lacp_slave_register(bundle
->lacp
, port
, lacp
);
2274 bundle_destroy(struct ofbundle
*bundle
)
2276 struct ofproto_dpif
*ofproto
;
2277 struct ofport_dpif
*port
, *next_port
;
2284 ofproto
= bundle
->ofproto
;
2285 for (i
= 0; i
< MAX_MIRRORS
; i
++) {
2286 struct ofmirror
*m
= ofproto
->mirrors
[i
];
2288 if (m
->out
== bundle
) {
2290 } else if (hmapx_find_and_delete(&m
->srcs
, bundle
)
2291 || hmapx_find_and_delete(&m
->dsts
, bundle
)) {
2292 ofproto
->backer
->need_revalidate
= REV_RECONFIGURE
;
2297 LIST_FOR_EACH_SAFE (port
, next_port
, bundle_node
, &bundle
->ports
) {
2298 bundle_del_port(port
);
2301 bundle_flush_macs(bundle
, true);
2302 hmap_remove(&ofproto
->bundles
, &bundle
->hmap_node
);
2304 free(bundle
->trunks
);
2305 lacp_destroy(bundle
->lacp
);
2306 bond_destroy(bundle
->bond
);
2311 bundle_set(struct ofproto
*ofproto_
, void *aux
,
2312 const struct ofproto_bundle_settings
*s
)
2314 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
2315 bool need_flush
= false;
2316 struct ofport_dpif
*port
;
2317 struct ofbundle
*bundle
;
2318 unsigned long *trunks
;
2324 bundle_destroy(bundle_lookup(ofproto
, aux
));
2328 ovs_assert(s
->n_slaves
== 1 || s
->bond
!= NULL
);
2329 ovs_assert((s
->lacp
!= NULL
) == (s
->lacp_slaves
!= NULL
));
2331 bundle
= bundle_lookup(ofproto
, aux
);
2333 bundle
= xmalloc(sizeof *bundle
);
2335 bundle
->ofproto
= ofproto
;
2336 hmap_insert(&ofproto
->bundles
, &bundle
->hmap_node
,
2337 hash_pointer(aux
, 0));
2339 bundle
->name
= NULL
;
2341 list_init(&bundle
->ports
);
2342 bundle
->vlan_mode
= PORT_VLAN_TRUNK
;
2344 bundle
->trunks
= NULL
;
2345 bundle
->use_priority_tags
= s
->use_priority_tags
;
2346 bundle
->lacp
= NULL
;
2347 bundle
->bond
= NULL
;
2349 bundle
->floodable
= true;
2351 bundle
->src_mirrors
= 0;
2352 bundle
->dst_mirrors
= 0;
2353 bundle
->mirror_out
= 0;
2356 if (!bundle
->name
|| strcmp(s
->name
, bundle
->name
)) {
2358 bundle
->name
= xstrdup(s
->name
);
2363 if (!bundle
->lacp
) {
2364 ofproto
->backer
->need_revalidate
= REV_RECONFIGURE
;
2365 bundle
->lacp
= lacp_create();
2367 lacp_configure(bundle
->lacp
, s
->lacp
);
2369 lacp_destroy(bundle
->lacp
);
2370 bundle
->lacp
= NULL
;
2373 /* Update set of ports. */
2375 for (i
= 0; i
< s
->n_slaves
; i
++) {
2376 if (!bundle_add_port(bundle
, s
->slaves
[i
],
2377 s
->lacp
? &s
->lacp_slaves
[i
] : NULL
)) {
2381 if (!ok
|| list_size(&bundle
->ports
) != s
->n_slaves
) {
2382 struct ofport_dpif
*next_port
;
2384 LIST_FOR_EACH_SAFE (port
, next_port
, bundle_node
, &bundle
->ports
) {
2385 for (i
= 0; i
< s
->n_slaves
; i
++) {
2386 if (s
->slaves
[i
] == port
->up
.ofp_port
) {
2391 bundle_del_port(port
);
2395 ovs_assert(list_size(&bundle
->ports
) <= s
->n_slaves
);
2397 if (list_is_empty(&bundle
->ports
)) {
2398 bundle_destroy(bundle
);
2402 /* Set VLAN tagging mode */
2403 if (s
->vlan_mode
!= bundle
->vlan_mode
2404 || s
->use_priority_tags
!= bundle
->use_priority_tags
) {
2405 bundle
->vlan_mode
= s
->vlan_mode
;
2406 bundle
->use_priority_tags
= s
->use_priority_tags
;
2411 vlan
= (s
->vlan_mode
== PORT_VLAN_TRUNK
? -1
2412 : s
->vlan
>= 0 && s
->vlan
<= 4095 ? s
->vlan
2414 if (vlan
!= bundle
->vlan
) {
2415 bundle
->vlan
= vlan
;
2419 /* Get trunked VLANs. */
2420 switch (s
->vlan_mode
) {
2421 case PORT_VLAN_ACCESS
:
2425 case PORT_VLAN_TRUNK
:
2426 trunks
= CONST_CAST(unsigned long *, s
->trunks
);
2429 case PORT_VLAN_NATIVE_UNTAGGED
:
2430 case PORT_VLAN_NATIVE_TAGGED
:
2431 if (vlan
!= 0 && (!s
->trunks
2432 || !bitmap_is_set(s
->trunks
, vlan
)
2433 || bitmap_is_set(s
->trunks
, 0))) {
2434 /* Force trunking the native VLAN and prohibit trunking VLAN 0. */
2436 trunks
= bitmap_clone(s
->trunks
, 4096);
2438 trunks
= bitmap_allocate1(4096);
2440 bitmap_set1(trunks
, vlan
);
2441 bitmap_set0(trunks
, 0);
2443 trunks
= CONST_CAST(unsigned long *, s
->trunks
);
2450 if (!vlan_bitmap_equal(trunks
, bundle
->trunks
)) {
2451 free(bundle
->trunks
);
2452 if (trunks
== s
->trunks
) {
2453 bundle
->trunks
= vlan_bitmap_clone(trunks
);
2455 bundle
->trunks
= trunks
;
2460 if (trunks
!= s
->trunks
) {
2465 if (!list_is_short(&bundle
->ports
)) {
2466 bundle
->ofproto
->has_bonded_bundles
= true;
2468 if (bond_reconfigure(bundle
->bond
, s
->bond
)) {
2469 ofproto
->backer
->need_revalidate
= REV_RECONFIGURE
;
2472 bundle
->bond
= bond_create(s
->bond
);
2473 ofproto
->backer
->need_revalidate
= REV_RECONFIGURE
;
2476 LIST_FOR_EACH (port
, bundle_node
, &bundle
->ports
) {
2477 bond_slave_register(bundle
->bond
, port
, port
->up
.netdev
);
2480 bond_destroy(bundle
->bond
);
2481 bundle
->bond
= NULL
;
2484 /* If we changed something that would affect MAC learning, un-learn
2485 * everything on this port and force flow revalidation. */
2487 bundle_flush_macs(bundle
, false);
2494 bundle_remove(struct ofport
*port_
)
2496 struct ofport_dpif
*port
= ofport_dpif_cast(port_
);
2497 struct ofbundle
*bundle
= port
->bundle
;
2500 bundle_del_port(port
);
2501 if (list_is_empty(&bundle
->ports
)) {
2502 bundle_destroy(bundle
);
2503 } else if (list_is_short(&bundle
->ports
)) {
2504 bond_destroy(bundle
->bond
);
2505 bundle
->bond
= NULL
;
2511 send_pdu_cb(void *port_
, const void *pdu
, size_t pdu_size
)
2513 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(1, 10);
2514 struct ofport_dpif
*port
= port_
;
2515 uint8_t ea
[ETH_ADDR_LEN
];
2518 error
= netdev_get_etheraddr(port
->up
.netdev
, ea
);
2520 struct ofpbuf packet
;
2523 ofpbuf_init(&packet
, 0);
2524 packet_pdu
= eth_compose(&packet
, eth_addr_lacp
, ea
, ETH_TYPE_LACP
,
2526 memcpy(packet_pdu
, pdu
, pdu_size
);
2528 send_packet(port
, &packet
);
2529 ofpbuf_uninit(&packet
);
2531 VLOG_ERR_RL(&rl
, "port %s: cannot obtain Ethernet address of iface "
2532 "%s (%s)", port
->bundle
->name
,
2533 netdev_get_name(port
->up
.netdev
), strerror(error
));
2538 bundle_send_learning_packets(struct ofbundle
*bundle
)
2540 struct ofproto_dpif
*ofproto
= bundle
->ofproto
;
2541 int error
, n_packets
, n_errors
;
2542 struct mac_entry
*e
;
2544 error
= n_packets
= n_errors
= 0;
2545 LIST_FOR_EACH (e
, lru_node
, &ofproto
->ml
->lrus
) {
2546 if (e
->port
.p
!= bundle
) {
2547 struct ofpbuf
*learning_packet
;
2548 struct ofport_dpif
*port
;
2552 /* The assignment to "port" is unnecessary but makes "grep"ing for
2553 * struct ofport_dpif more effective. */
2554 learning_packet
= bond_compose_learning_packet(bundle
->bond
,
2558 ret
= send_packet(port
, learning_packet
);
2559 ofpbuf_delete(learning_packet
);
2569 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(1, 5);
2570 VLOG_WARN_RL(&rl
, "bond %s: %d errors sending %d gratuitous learning "
2571 "packets, last error was: %s",
2572 bundle
->name
, n_errors
, n_packets
, strerror(error
));
2574 VLOG_DBG("bond %s: sent %d gratuitous learning packets",
2575 bundle
->name
, n_packets
);
2580 bundle_run(struct ofbundle
*bundle
)
2583 lacp_run(bundle
->lacp
, send_pdu_cb
);
2586 struct ofport_dpif
*port
;
2588 LIST_FOR_EACH (port
, bundle_node
, &bundle
->ports
) {
2589 bond_slave_set_may_enable(bundle
->bond
, port
, port
->may_enable
);
2592 bond_run(bundle
->bond
, &bundle
->ofproto
->backer
->revalidate_set
,
2593 lacp_status(bundle
->lacp
));
2594 if (bond_should_send_learning_packets(bundle
->bond
)) {
2595 bundle_send_learning_packets(bundle
);
2601 bundle_wait(struct ofbundle
*bundle
)
2604 lacp_wait(bundle
->lacp
);
2607 bond_wait(bundle
->bond
);
2614 mirror_scan(struct ofproto_dpif
*ofproto
)
2618 for (idx
= 0; idx
< MAX_MIRRORS
; idx
++) {
2619 if (!ofproto
->mirrors
[idx
]) {
2626 static struct ofmirror
*
2627 mirror_lookup(struct ofproto_dpif
*ofproto
, void *aux
)
2631 for (i
= 0; i
< MAX_MIRRORS
; i
++) {
2632 struct ofmirror
*mirror
= ofproto
->mirrors
[i
];
2633 if (mirror
&& mirror
->aux
== aux
) {
2641 /* Update the 'dup_mirrors' member of each of the ofmirrors in 'ofproto'. */
2643 mirror_update_dups(struct ofproto_dpif
*ofproto
)
2647 for (i
= 0; i
< MAX_MIRRORS
; i
++) {
2648 struct ofmirror
*m
= ofproto
->mirrors
[i
];
2651 m
->dup_mirrors
= MIRROR_MASK_C(1) << i
;
2655 for (i
= 0; i
< MAX_MIRRORS
; i
++) {
2656 struct ofmirror
*m1
= ofproto
->mirrors
[i
];
2663 for (j
= i
+ 1; j
< MAX_MIRRORS
; j
++) {
2664 struct ofmirror
*m2
= ofproto
->mirrors
[j
];
2666 if (m2
&& m1
->out
== m2
->out
&& m1
->out_vlan
== m2
->out_vlan
) {
2667 m1
->dup_mirrors
|= MIRROR_MASK_C(1) << j
;
2668 m2
->dup_mirrors
|= m1
->dup_mirrors
;
2675 mirror_set(struct ofproto
*ofproto_
, void *aux
,
2676 const struct ofproto_mirror_settings
*s
)
2678 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
2679 mirror_mask_t mirror_bit
;
2680 struct ofbundle
*bundle
;
2681 struct ofmirror
*mirror
;
2682 struct ofbundle
*out
;
2683 struct hmapx srcs
; /* Contains "struct ofbundle *"s. */
2684 struct hmapx dsts
; /* Contains "struct ofbundle *"s. */
2687 mirror
= mirror_lookup(ofproto
, aux
);
2689 mirror_destroy(mirror
);
2695 idx
= mirror_scan(ofproto
);
2697 VLOG_WARN("bridge %s: maximum of %d port mirrors reached, "
2699 ofproto
->up
.name
, MAX_MIRRORS
, s
->name
);
2703 mirror
= ofproto
->mirrors
[idx
] = xzalloc(sizeof *mirror
);
2704 mirror
->ofproto
= ofproto
;
2707 mirror
->out_vlan
= -1;
2708 mirror
->name
= NULL
;
2711 if (!mirror
->name
|| strcmp(s
->name
, mirror
->name
)) {
2713 mirror
->name
= xstrdup(s
->name
);
2716 /* Get the new configuration. */
2717 if (s
->out_bundle
) {
2718 out
= bundle_lookup(ofproto
, s
->out_bundle
);
2720 mirror_destroy(mirror
);
2726 out_vlan
= s
->out_vlan
;
2728 bundle_lookup_multiple(ofproto
, s
->srcs
, s
->n_srcs
, &srcs
);
2729 bundle_lookup_multiple(ofproto
, s
->dsts
, s
->n_dsts
, &dsts
);
2731 /* If the configuration has not changed, do nothing. */
2732 if (hmapx_equals(&srcs
, &mirror
->srcs
)
2733 && hmapx_equals(&dsts
, &mirror
->dsts
)
2734 && vlan_bitmap_equal(mirror
->vlans
, s
->src_vlans
)
2735 && mirror
->out
== out
2736 && mirror
->out_vlan
== out_vlan
)
2738 hmapx_destroy(&srcs
);
2739 hmapx_destroy(&dsts
);
2743 hmapx_swap(&srcs
, &mirror
->srcs
);
2744 hmapx_destroy(&srcs
);
2746 hmapx_swap(&dsts
, &mirror
->dsts
);
2747 hmapx_destroy(&dsts
);
2749 free(mirror
->vlans
);
2750 mirror
->vlans
= vlan_bitmap_clone(s
->src_vlans
);
2753 mirror
->out_vlan
= out_vlan
;
2755 /* Update bundles. */
2756 mirror_bit
= MIRROR_MASK_C(1) << mirror
->idx
;
2757 HMAP_FOR_EACH (bundle
, hmap_node
, &mirror
->ofproto
->bundles
) {
2758 if (hmapx_contains(&mirror
->srcs
, bundle
)) {
2759 bundle
->src_mirrors
|= mirror_bit
;
2761 bundle
->src_mirrors
&= ~mirror_bit
;
2764 if (hmapx_contains(&mirror
->dsts
, bundle
)) {
2765 bundle
->dst_mirrors
|= mirror_bit
;
2767 bundle
->dst_mirrors
&= ~mirror_bit
;
2770 if (mirror
->out
== bundle
) {
2771 bundle
->mirror_out
|= mirror_bit
;
2773 bundle
->mirror_out
&= ~mirror_bit
;
2777 ofproto
->backer
->need_revalidate
= REV_RECONFIGURE
;
2778 ofproto
->has_mirrors
= true;
2779 mac_learning_flush(ofproto
->ml
,
2780 &ofproto
->backer
->revalidate_set
);
2781 mirror_update_dups(ofproto
);
2787 mirror_destroy(struct ofmirror
*mirror
)
2789 struct ofproto_dpif
*ofproto
;
2790 mirror_mask_t mirror_bit
;
2791 struct ofbundle
*bundle
;
2798 ofproto
= mirror
->ofproto
;
2799 ofproto
->backer
->need_revalidate
= REV_RECONFIGURE
;
2800 mac_learning_flush(ofproto
->ml
, &ofproto
->backer
->revalidate_set
);
2802 mirror_bit
= MIRROR_MASK_C(1) << mirror
->idx
;
2803 HMAP_FOR_EACH (bundle
, hmap_node
, &ofproto
->bundles
) {
2804 bundle
->src_mirrors
&= ~mirror_bit
;
2805 bundle
->dst_mirrors
&= ~mirror_bit
;
2806 bundle
->mirror_out
&= ~mirror_bit
;
2809 hmapx_destroy(&mirror
->srcs
);
2810 hmapx_destroy(&mirror
->dsts
);
2811 free(mirror
->vlans
);
2813 ofproto
->mirrors
[mirror
->idx
] = NULL
;
2817 mirror_update_dups(ofproto
);
2819 ofproto
->has_mirrors
= false;
2820 for (i
= 0; i
< MAX_MIRRORS
; i
++) {
2821 if (ofproto
->mirrors
[i
]) {
2822 ofproto
->has_mirrors
= true;
2829 mirror_get_stats(struct ofproto
*ofproto_
, void *aux
,
2830 uint64_t *packets
, uint64_t *bytes
)
2832 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
2833 struct ofmirror
*mirror
= mirror_lookup(ofproto
, aux
);
2836 *packets
= *bytes
= UINT64_MAX
;
2840 *packets
= mirror
->packet_count
;
2841 *bytes
= mirror
->byte_count
;
2847 set_flood_vlans(struct ofproto
*ofproto_
, unsigned long *flood_vlans
)
2849 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
2850 if (mac_learning_set_flood_vlans(ofproto
->ml
, flood_vlans
)) {
2851 mac_learning_flush(ofproto
->ml
, &ofproto
->backer
->revalidate_set
);
2857 is_mirror_output_bundle(const struct ofproto
*ofproto_
, void *aux
)
2859 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
2860 struct ofbundle
*bundle
= bundle_lookup(ofproto
, aux
);
2861 return bundle
&& bundle
->mirror_out
!= 0;
2865 forward_bpdu_changed(struct ofproto
*ofproto_
)
2867 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
2868 ofproto
->backer
->need_revalidate
= REV_RECONFIGURE
;
2872 set_mac_table_config(struct ofproto
*ofproto_
, unsigned int idle_time
,
2875 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
2876 mac_learning_set_idle_time(ofproto
->ml
, idle_time
);
2877 mac_learning_set_max_entries(ofproto
->ml
, max_entries
);
2882 static struct ofport_dpif
*
2883 get_ofp_port(const struct ofproto_dpif
*ofproto
, uint16_t ofp_port
)
2885 struct ofport
*ofport
= ofproto_get_port(&ofproto
->up
, ofp_port
);
2886 return ofport
? ofport_dpif_cast(ofport
) : NULL
;
2889 static struct ofport_dpif
*
2890 get_odp_port(const struct ofproto_dpif
*ofproto
, uint32_t odp_port
)
2892 struct ofport_dpif
*port
= odp_port_to_ofport(ofproto
->backer
, odp_port
);
2893 return port
&& &ofproto
->up
== port
->up
.ofproto
? port
: NULL
;
2897 ofproto_port_from_dpif_port(struct ofproto_dpif
*ofproto
,
2898 struct ofproto_port
*ofproto_port
,
2899 struct dpif_port
*dpif_port
)
2901 ofproto_port
->name
= dpif_port
->name
;
2902 ofproto_port
->type
= dpif_port
->type
;
2903 ofproto_port
->ofp_port
= odp_port_to_ofp_port(ofproto
, dpif_port
->port_no
);
2906 static struct ofport_dpif
*
2907 ofport_get_peer(const struct ofport_dpif
*ofport_dpif
)
2909 const struct ofproto_dpif
*ofproto
;
2912 peer
= netdev_vport_patch_peer(ofport_dpif
->up
.netdev
);
2917 HMAP_FOR_EACH (ofproto
, all_ofproto_dpifs_node
, &all_ofproto_dpifs
) {
2918 struct ofport
*ofport
;
2920 ofport
= shash_find_data(&ofproto
->up
.port_by_name
, peer
);
2921 if (ofport
&& ofport
->ofproto
->ofproto_class
== &ofproto_dpif_class
) {
2922 return ofport_dpif_cast(ofport
);
2929 port_run_fast(struct ofport_dpif
*ofport
)
2931 if (ofport
->cfm
&& cfm_should_send_ccm(ofport
->cfm
)) {
2932 struct ofpbuf packet
;
2934 ofpbuf_init(&packet
, 0);
2935 cfm_compose_ccm(ofport
->cfm
, &packet
, ofport
->up
.pp
.hw_addr
);
2936 send_packet(ofport
, &packet
);
2937 ofpbuf_uninit(&packet
);
2942 port_run(struct ofport_dpif
*ofport
)
2944 long long int carrier_seq
= netdev_get_carrier_resets(ofport
->up
.netdev
);
2945 bool carrier_changed
= carrier_seq
!= ofport
->carrier_seq
;
2946 bool enable
= netdev_get_carrier(ofport
->up
.netdev
);
2948 ofport
->carrier_seq
= carrier_seq
;
2950 port_run_fast(ofport
);
2952 if (ofport
->tnl_port
2953 && tnl_port_reconfigure(&ofport
->up
, ofport
->odp_port
,
2954 &ofport
->tnl_port
)) {
2955 ofproto_dpif_cast(ofport
->up
.ofproto
)->backer
->need_revalidate
= true;
2959 int cfm_opup
= cfm_get_opup(ofport
->cfm
);
2961 cfm_run(ofport
->cfm
);
2962 enable
= enable
&& !cfm_get_fault(ofport
->cfm
);
2964 if (cfm_opup
>= 0) {
2965 enable
= enable
&& cfm_opup
;
2969 if (ofport
->bundle
) {
2970 enable
= enable
&& lacp_slave_may_enable(ofport
->bundle
->lacp
, ofport
);
2971 if (carrier_changed
) {
2972 lacp_slave_carrier_changed(ofport
->bundle
->lacp
, ofport
);
2976 if (ofport
->may_enable
!= enable
) {
2977 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofport
->up
.ofproto
);
2979 if (ofproto
->has_bundle_action
) {
2980 ofproto
->backer
->need_revalidate
= REV_PORT_TOGGLED
;
2984 ofport
->may_enable
= enable
;
2988 port_wait(struct ofport_dpif
*ofport
)
2991 cfm_wait(ofport
->cfm
);
2996 port_query_by_name(const struct ofproto
*ofproto_
, const char *devname
,
2997 struct ofproto_port
*ofproto_port
)
2999 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
3000 struct dpif_port dpif_port
;
3003 if (sset_contains(&ofproto
->ghost_ports
, devname
)) {
3004 const char *type
= netdev_get_type_from_name(devname
);
3006 /* We may be called before ofproto->up.port_by_name is populated with
3007 * the appropriate ofport. For this reason, we must get the name and
3008 * type from the netdev layer directly. */
3010 const struct ofport
*ofport
;
3012 ofport
= shash_find_data(&ofproto
->up
.port_by_name
, devname
);
3013 ofproto_port
->ofp_port
= ofport
? ofport
->ofp_port
: OFPP_NONE
;
3014 ofproto_port
->name
= xstrdup(devname
);
3015 ofproto_port
->type
= xstrdup(type
);
3021 if (!sset_contains(&ofproto
->ports
, devname
)) {
3024 error
= dpif_port_query_by_name(ofproto
->backer
->dpif
,
3025 devname
, &dpif_port
);
3027 ofproto_port_from_dpif_port(ofproto
, ofproto_port
, &dpif_port
);
3033 port_add(struct ofproto
*ofproto_
, struct netdev
*netdev
)
3035 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
3036 const char *dp_port_name
= netdev_vport_get_dpif_port(netdev
);
3037 const char *devname
= netdev_get_name(netdev
);
3039 if (netdev_vport_is_patch(netdev
)) {
3040 sset_add(&ofproto
->ghost_ports
, netdev_get_name(netdev
));
3044 if (!dpif_port_exists(ofproto
->backer
->dpif
, dp_port_name
)) {
3045 uint32_t port_no
= UINT32_MAX
;
3048 error
= dpif_port_add(ofproto
->backer
->dpif
, netdev
, &port_no
);
3052 if (netdev_get_tunnel_config(netdev
)) {
3053 simap_put(&ofproto
->backer
->tnl_backers
, dp_port_name
, port_no
);
3057 if (netdev_get_tunnel_config(netdev
)) {
3058 sset_add(&ofproto
->ghost_ports
, devname
);
3060 sset_add(&ofproto
->ports
, devname
);
3066 port_del(struct ofproto
*ofproto_
, uint16_t ofp_port
)
3068 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
3069 struct ofport_dpif
*ofport
= get_ofp_port(ofproto
, ofp_port
);
3076 sset_find_and_delete(&ofproto
->ghost_ports
,
3077 netdev_get_name(ofport
->up
.netdev
));
3078 ofproto
->backer
->need_revalidate
= REV_RECONFIGURE
;
3079 if (!ofport
->tnl_port
) {
3080 error
= dpif_port_del(ofproto
->backer
->dpif
, ofport
->odp_port
);
3082 /* The caller is going to close ofport->up.netdev. If this is a
3083 * bonded port, then the bond is using that netdev, so remove it
3084 * from the bond. The client will need to reconfigure everything
3085 * after deleting ports, so then the slave will get re-added. */
3086 bundle_remove(&ofport
->up
);
3093 port_get_stats(const struct ofport
*ofport_
, struct netdev_stats
*stats
)
3095 struct ofport_dpif
*ofport
= ofport_dpif_cast(ofport_
);
3098 error
= netdev_get_stats(ofport
->up
.netdev
, stats
);
3100 if (!error
&& ofport_
->ofp_port
== OFPP_LOCAL
) {
3101 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofport
->up
.ofproto
);
3103 /* ofproto->stats.tx_packets represents packets that we created
3104 * internally and sent to some port (e.g. packets sent with
3105 * send_packet()). Account for them as if they had come from
3106 * OFPP_LOCAL and got forwarded. */
3108 if (stats
->rx_packets
!= UINT64_MAX
) {
3109 stats
->rx_packets
+= ofproto
->stats
.tx_packets
;
3112 if (stats
->rx_bytes
!= UINT64_MAX
) {
3113 stats
->rx_bytes
+= ofproto
->stats
.tx_bytes
;
3116 /* ofproto->stats.rx_packets represents packets that were received on
3117 * some port and we processed internally and dropped (e.g. STP).
3118 * Account for them as if they had been forwarded to OFPP_LOCAL. */
3120 if (stats
->tx_packets
!= UINT64_MAX
) {
3121 stats
->tx_packets
+= ofproto
->stats
.rx_packets
;
3124 if (stats
->tx_bytes
!= UINT64_MAX
) {
3125 stats
->tx_bytes
+= ofproto
->stats
.rx_bytes
;
3132 /* Account packets for LOCAL port. */
3134 ofproto_update_local_port_stats(const struct ofproto
*ofproto_
,
3135 size_t tx_size
, size_t rx_size
)
3137 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
3140 ofproto
->stats
.rx_packets
++;
3141 ofproto
->stats
.rx_bytes
+= rx_size
;
3144 ofproto
->stats
.tx_packets
++;
3145 ofproto
->stats
.tx_bytes
+= tx_size
;
3149 struct port_dump_state
{
3154 struct ofproto_port port
;
3159 port_dump_start(const struct ofproto
*ofproto_ OVS_UNUSED
, void **statep
)
3161 *statep
= xzalloc(sizeof(struct port_dump_state
));
3166 port_dump_next(const struct ofproto
*ofproto_
, void *state_
,
3167 struct ofproto_port
*port
)
3169 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
3170 struct port_dump_state
*state
= state_
;
3171 const struct sset
*sset
;
3172 struct sset_node
*node
;
3174 if (state
->has_port
) {
3175 ofproto_port_destroy(&state
->port
);
3176 state
->has_port
= false;
3178 sset
= state
->ghost
? &ofproto
->ghost_ports
: &ofproto
->ports
;
3179 while ((node
= sset_at_position(sset
, &state
->bucket
, &state
->offset
))) {
3182 error
= port_query_by_name(ofproto_
, node
->name
, &state
->port
);
3184 *port
= state
->port
;
3185 state
->has_port
= true;
3187 } else if (error
!= ENODEV
) {
3192 if (!state
->ghost
) {
3193 state
->ghost
= true;
3196 return port_dump_next(ofproto_
, state_
, port
);
3203 port_dump_done(const struct ofproto
*ofproto_ OVS_UNUSED
, void *state_
)
3205 struct port_dump_state
*state
= state_
;
3207 if (state
->has_port
) {
3208 ofproto_port_destroy(&state
->port
);
3215 port_poll(const struct ofproto
*ofproto_
, char **devnamep
)
3217 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
3219 if (ofproto
->port_poll_errno
) {
3220 int error
= ofproto
->port_poll_errno
;
3221 ofproto
->port_poll_errno
= 0;
3225 if (sset_is_empty(&ofproto
->port_poll_set
)) {
3229 *devnamep
= sset_pop(&ofproto
->port_poll_set
);
3234 port_poll_wait(const struct ofproto
*ofproto_
)
3236 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
3237 dpif_port_poll_wait(ofproto
->backer
->dpif
);
3241 port_is_lacp_current(const struct ofport
*ofport_
)
3243 const struct ofport_dpif
*ofport
= ofport_dpif_cast(ofport_
);
3244 return (ofport
->bundle
&& ofport
->bundle
->lacp
3245 ? lacp_slave_is_current(ofport
->bundle
->lacp
, ofport
)
3249 /* Upcall handling. */
3251 /* Flow miss batching.
3253 * Some dpifs implement operations faster when you hand them off in a batch.
3254 * To allow batching, "struct flow_miss" queues the dpif-related work needed
3255 * for a given flow. Each "struct flow_miss" corresponds to sending one or
3256 * more packets, plus possibly installing the flow in the dpif.
3258 * So far we only batch the operations that affect flow setup time the most.
3259 * It's possible to batch more than that, but the benefit might be minimal. */
3261 struct hmap_node hmap_node
;
3262 struct ofproto_dpif
*ofproto
;
3264 enum odp_key_fitness key_fitness
;
3265 const struct nlattr
*key
;
3267 ovs_be16 initial_tci
;
3268 struct list packets
;
3269 enum dpif_upcall_type upcall_type
;
3270 uint32_t odp_in_port
;
3273 struct flow_miss_op
{
3274 struct dpif_op dpif_op
;
3275 void *garbage
; /* Pointer to pass to free(), NULL if none. */
3276 uint64_t stub
[1024 / 8]; /* Temporary buffer. */
3279 /* Sends an OFPT_PACKET_IN message for 'packet' of type OFPR_NO_MATCH to each
3280 * OpenFlow controller as necessary according to their individual
3281 * configurations. */
3283 send_packet_in_miss(struct ofproto_dpif
*ofproto
, const struct ofpbuf
*packet
,
3284 const struct flow
*flow
)
3286 struct ofputil_packet_in pin
;
3288 pin
.packet
= packet
->data
;
3289 pin
.packet_len
= packet
->size
;
3290 pin
.reason
= OFPR_NO_MATCH
;
3291 pin
.controller_id
= 0;
3296 pin
.send_len
= 0; /* not used for flow table misses */
3298 flow_get_metadata(flow
, &pin
.fmd
);
3300 connmgr_send_packet_in(ofproto
->up
.connmgr
, &pin
);
3303 static enum slow_path_reason
3304 process_special(struct ofproto_dpif
*ofproto
, const struct flow
*flow
,
3305 const struct ofport_dpif
*ofport
, const struct ofpbuf
*packet
)
3309 } else if (ofport
->cfm
&& cfm_should_process_flow(ofport
->cfm
, flow
)) {
3311 cfm_process_heartbeat(ofport
->cfm
, packet
);
3314 } else if (ofport
->bundle
&& ofport
->bundle
->lacp
3315 && flow
->dl_type
== htons(ETH_TYPE_LACP
)) {
3317 lacp_process_packet(ofport
->bundle
->lacp
, ofport
, packet
);
3320 } else if (ofproto
->stp
&& stp_should_process_flow(flow
)) {
3322 stp_process_packet(ofport
, packet
);
3330 static struct flow_miss
*
3331 flow_miss_find(struct hmap
*todo
, const struct ofproto_dpif
*ofproto
,
3332 const struct flow
*flow
, uint32_t hash
)
3334 struct flow_miss
*miss
;
3336 HMAP_FOR_EACH_WITH_HASH (miss
, hmap_node
, hash
, todo
) {
3337 if (miss
->ofproto
== ofproto
&& flow_equal(&miss
->flow
, flow
)) {
3345 /* Partially Initializes 'op' as an "execute" operation for 'miss' and
3346 * 'packet'. The caller must initialize op->actions and op->actions_len. If
3347 * 'miss' is associated with a subfacet the caller must also initialize the
3348 * returned op->subfacet, and if anything needs to be freed after processing
3349 * the op, the caller must initialize op->garbage also. */
3351 init_flow_miss_execute_op(struct flow_miss
*miss
, struct ofpbuf
*packet
,
3352 struct flow_miss_op
*op
)
3354 if (miss
->flow
.vlan_tci
!= miss
->initial_tci
) {
3355 /* This packet was received on a VLAN splinter port. We
3356 * added a VLAN to the packet to make the packet resemble
3357 * the flow, but the actions were composed assuming that
3358 * the packet contained no VLAN. So, we must remove the
3359 * VLAN header from the packet before trying to execute the
3361 eth_pop_vlan(packet
);
3365 op
->dpif_op
.type
= DPIF_OP_EXECUTE
;
3366 op
->dpif_op
.u
.execute
.key
= miss
->key
;
3367 op
->dpif_op
.u
.execute
.key_len
= miss
->key_len
;
3368 op
->dpif_op
.u
.execute
.packet
= packet
;
3371 /* Helper for handle_flow_miss_without_facet() and
3372 * handle_flow_miss_with_facet(). */
3374 handle_flow_miss_common(struct rule_dpif
*rule
,
3375 struct ofpbuf
*packet
, const struct flow
*flow
)
3377 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(rule
->up
.ofproto
);
3379 ofproto
->n_matches
++;
3381 if (rule
->up
.cr
.priority
== FAIL_OPEN_PRIORITY
) {
3383 * Extra-special case for fail-open mode.
3385 * We are in fail-open mode and the packet matched the fail-open
3386 * rule, but we are connected to a controller too. We should send
3387 * the packet up to the controller in the hope that it will try to
3388 * set up a flow and thereby allow us to exit fail-open.
3390 * See the top-level comment in fail-open.c for more information.
3392 send_packet_in_miss(ofproto
, packet
, flow
);
3396 /* Figures out whether a flow that missed in 'ofproto', whose details are in
3397 * 'miss', is likely to be worth tracking in detail in userspace and (usually)
3398 * installing a datapath flow. The answer is usually "yes" (a return value of
3399 * true). However, for short flows the cost of bookkeeping is much higher than
3400 * the benefits, so when the datapath holds a large number of flows we impose
3401 * some heuristics to decide which flows are likely to be worth tracking. */
3403 flow_miss_should_make_facet(struct ofproto_dpif
*ofproto
,
3404 struct flow_miss
*miss
, uint32_t hash
)
3406 if (!ofproto
->governor
) {
3409 n_subfacets
= hmap_count(&ofproto
->subfacets
);
3410 if (n_subfacets
* 2 <= ofproto
->up
.flow_eviction_threshold
) {
3414 ofproto
->governor
= governor_create(ofproto
->up
.name
);
3417 return governor_should_install_flow(ofproto
->governor
, hash
,
3418 list_size(&miss
->packets
));
3421 /* Handles 'miss', which matches 'rule', without creating a facet or subfacet
3422 * or creating any datapath flow. May add an "execute" operation to 'ops' and
3423 * increment '*n_ops'. */
3425 handle_flow_miss_without_facet(struct flow_miss
*miss
,
3426 struct rule_dpif
*rule
,
3427 struct flow_miss_op
*ops
, size_t *n_ops
)
3429 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(rule
->up
.ofproto
);
3430 long long int now
= time_msec();
3431 struct action_xlate_ctx ctx
;
3432 struct ofpbuf
*packet
;
3434 LIST_FOR_EACH (packet
, list_node
, &miss
->packets
) {
3435 struct flow_miss_op
*op
= &ops
[*n_ops
];
3436 struct dpif_flow_stats stats
;
3437 struct ofpbuf odp_actions
;
3439 COVERAGE_INC(facet_suppress
);
3441 ofpbuf_use_stub(&odp_actions
, op
->stub
, sizeof op
->stub
);
3443 dpif_flow_stats_extract(&miss
->flow
, packet
, now
, &stats
);
3444 rule_credit_stats(rule
, &stats
);
3446 action_xlate_ctx_init(&ctx
, ofproto
, &miss
->flow
, miss
->initial_tci
,
3448 ctx
.resubmit_stats
= &stats
;
3449 xlate_actions(&ctx
, rule
->up
.ofpacts
, rule
->up
.ofpacts_len
,
3452 if (odp_actions
.size
) {
3453 struct dpif_execute
*execute
= &op
->dpif_op
.u
.execute
;
3455 init_flow_miss_execute_op(miss
, packet
, op
);
3456 execute
->actions
= odp_actions
.data
;
3457 execute
->actions_len
= odp_actions
.size
;
3458 op
->garbage
= ofpbuf_get_uninit_pointer(&odp_actions
);
3462 ofpbuf_uninit(&odp_actions
);
3467 /* Handles 'miss', which matches 'facet'. May add any required datapath
3468 * operations to 'ops', incrementing '*n_ops' for each new op.
3470 * All of the packets in 'miss' are considered to have arrived at time 'now'.
3471 * This is really important only for new facets: if we just called time_msec()
3472 * here, then the new subfacet or its packets could look (occasionally) as
3473 * though it was used some time after the facet was used. That can make a
3474 * one-packet flow look like it has a nonzero duration, which looks odd in
3475 * e.g. NetFlow statistics. */
3477 handle_flow_miss_with_facet(struct flow_miss
*miss
, struct facet
*facet
,
3479 struct flow_miss_op
*ops
, size_t *n_ops
)
3481 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(facet
->rule
->up
.ofproto
);
3482 enum subfacet_path want_path
;
3483 struct subfacet
*subfacet
;
3484 struct ofpbuf
*packet
;
3486 subfacet
= subfacet_create(facet
, miss
, now
);
3488 LIST_FOR_EACH (packet
, list_node
, &miss
->packets
) {
3489 struct flow_miss_op
*op
= &ops
[*n_ops
];
3490 struct dpif_flow_stats stats
;
3491 struct ofpbuf odp_actions
;
3493 handle_flow_miss_common(facet
->rule
, packet
, &miss
->flow
);
3495 ofpbuf_use_stub(&odp_actions
, op
->stub
, sizeof op
->stub
);
3496 if (!subfacet
->actions
|| subfacet
->slow
) {
3497 subfacet_make_actions(subfacet
, packet
, &odp_actions
);
3500 dpif_flow_stats_extract(&facet
->flow
, packet
, now
, &stats
);
3501 subfacet_update_stats(subfacet
, &stats
);
3503 if (subfacet
->actions_len
) {
3504 struct dpif_execute
*execute
= &op
->dpif_op
.u
.execute
;
3506 init_flow_miss_execute_op(miss
, packet
, op
);
3507 if (!subfacet
->slow
) {
3508 execute
->actions
= subfacet
->actions
;
3509 execute
->actions_len
= subfacet
->actions_len
;
3510 ofpbuf_uninit(&odp_actions
);
3512 execute
->actions
= odp_actions
.data
;
3513 execute
->actions_len
= odp_actions
.size
;
3514 op
->garbage
= ofpbuf_get_uninit_pointer(&odp_actions
);
3519 ofpbuf_uninit(&odp_actions
);
3523 want_path
= subfacet_want_path(subfacet
->slow
);
3524 if (miss
->upcall_type
== DPIF_UC_MISS
|| subfacet
->path
!= want_path
) {
3525 struct flow_miss_op
*op
= &ops
[(*n_ops
)++];
3526 struct dpif_flow_put
*put
= &op
->dpif_op
.u
.flow_put
;
3528 subfacet
->path
= want_path
;
3531 op
->dpif_op
.type
= DPIF_OP_FLOW_PUT
;
3532 put
->flags
= DPIF_FP_CREATE
| DPIF_FP_MODIFY
;
3533 put
->key
= miss
->key
;
3534 put
->key_len
= miss
->key_len
;
3535 if (want_path
== SF_FAST_PATH
) {
3536 put
->actions
= subfacet
->actions
;
3537 put
->actions_len
= subfacet
->actions_len
;
3539 compose_slow_path(ofproto
, &facet
->flow
, subfacet
->slow
,
3540 op
->stub
, sizeof op
->stub
,
3541 &put
->actions
, &put
->actions_len
);
3547 /* Handles flow miss 'miss'. May add any required datapath operations
3548 * to 'ops', incrementing '*n_ops' for each new op. */
3550 handle_flow_miss(struct flow_miss
*miss
, struct flow_miss_op
*ops
,
3553 struct ofproto_dpif
*ofproto
= miss
->ofproto
;
3554 struct facet
*facet
;
3558 /* The caller must ensure that miss->hmap_node.hash contains
3559 * flow_hash(miss->flow, 0). */
3560 hash
= miss
->hmap_node
.hash
;
3562 facet
= facet_lookup_valid(ofproto
, &miss
->flow
, hash
);
3564 struct rule_dpif
*rule
= rule_dpif_lookup(ofproto
, &miss
->flow
);
3566 if (!flow_miss_should_make_facet(ofproto
, miss
, hash
)) {
3567 handle_flow_miss_without_facet(miss
, rule
, ops
, n_ops
);
3571 facet
= facet_create(rule
, &miss
->flow
, hash
);
3576 handle_flow_miss_with_facet(miss
, facet
, now
, ops
, n_ops
);
3579 static struct drop_key
*
3580 drop_key_lookup(const struct dpif_backer
*backer
, const struct nlattr
*key
,
3583 struct drop_key
*drop_key
;
3585 HMAP_FOR_EACH_WITH_HASH (drop_key
, hmap_node
, hash_bytes(key
, key_len
, 0),
3586 &backer
->drop_keys
) {
3587 if (drop_key
->key_len
== key_len
3588 && !memcmp(drop_key
->key
, key
, key_len
)) {
3596 drop_key_clear(struct dpif_backer
*backer
)
3598 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(1, 15);
3599 struct drop_key
*drop_key
, *next
;
3601 HMAP_FOR_EACH_SAFE (drop_key
, next
, hmap_node
, &backer
->drop_keys
) {
3604 error
= dpif_flow_del(backer
->dpif
, drop_key
->key
, drop_key
->key_len
,
3606 if (error
&& !VLOG_DROP_WARN(&rl
)) {
3607 struct ds ds
= DS_EMPTY_INITIALIZER
;
3608 odp_flow_key_format(drop_key
->key
, drop_key
->key_len
, &ds
);
3609 VLOG_WARN("Failed to delete drop key (%s) (%s)", strerror(error
),
3614 hmap_remove(&backer
->drop_keys
, &drop_key
->hmap_node
);
3615 free(drop_key
->key
);
3620 /* Given a datpath, packet, and flow metadata ('backer', 'packet', and 'key'
3621 * respectively), populates 'flow' with the result of odp_flow_key_to_flow().
3622 * Optionally, if nonnull, populates 'fitnessp' with the fitness of 'flow' as
3623 * returned by odp_flow_key_to_flow(). Also, optionally populates 'ofproto'
3624 * with the ofproto_dpif, and 'odp_in_port' with the datapath in_port, that
3625 * 'packet' ingressed.
3627 * If 'ofproto' is nonnull, requires 'flow''s in_port to exist. Otherwise sets
3628 * 'flow''s in_port to OFPP_NONE.
3630 * This function does post-processing on data returned from
3631 * odp_flow_key_to_flow() to help make VLAN splinters transparent to the rest
3632 * of the upcall processing logic. In particular, if the extracted in_port is
3633 * a VLAN splinter port, it replaces flow->in_port by the "real" port, sets
3634 * flow->vlan_tci correctly for the VLAN of the VLAN splinter port, and pushes
3635 * a VLAN header onto 'packet' (if it is nonnull).
3637 * Optionally, if nonnull, sets '*initial_tci' to the VLAN TCI with which the
3638 * packet was really received, that is, the actual VLAN TCI extracted by
3639 * odp_flow_key_to_flow(). (This differs from the value returned in
3640 * flow->vlan_tci only for packets received on VLAN splinters.)
3642 * Similarly, this function also includes some logic to help with tunnels. It
3643 * may modify 'flow' as necessary to make the tunneling implementation
3644 * transparent to the upcall processing logic.
3646 * Returns 0 if successful, ENODEV if the parsed flow has no associated ofport,
3647 * or some other positive errno if there are other problems. */
3649 ofproto_receive(const struct dpif_backer
*backer
, struct ofpbuf
*packet
,
3650 const struct nlattr
*key
, size_t key_len
,
3651 struct flow
*flow
, enum odp_key_fitness
*fitnessp
,
3652 struct ofproto_dpif
**ofproto
, uint32_t *odp_in_port
,
3653 ovs_be16
*initial_tci
)
3655 const struct ofport_dpif
*port
;
3656 enum odp_key_fitness fitness
;
3659 fitness
= odp_flow_key_to_flow(key
, key_len
, flow
);
3660 if (fitness
== ODP_FIT_ERROR
) {
3666 *initial_tci
= flow
->vlan_tci
;
3670 *odp_in_port
= flow
->in_port
;
3673 if (tnl_port_should_receive(flow
)) {
3674 const struct ofport
*ofport
= tnl_port_receive(flow
);
3676 flow
->in_port
= OFPP_NONE
;
3679 port
= ofport_dpif_cast(ofport
);
3681 /* We can't reproduce 'key' from 'flow'. */
3682 fitness
= fitness
== ODP_FIT_PERFECT
? ODP_FIT_TOO_MUCH
: fitness
;
3684 /* XXX: Since the tunnel module is not scoped per backer, it's
3685 * theoretically possible that we'll receive an ofport belonging to an
3686 * entirely different datapath. In practice, this can't happen because
3687 * no platforms has two separate datapaths which each support
3689 ovs_assert(ofproto_dpif_cast(port
->up
.ofproto
)->backer
== backer
);
3691 port
= odp_port_to_ofport(backer
, flow
->in_port
);
3693 flow
->in_port
= OFPP_NONE
;
3697 flow
->in_port
= port
->up
.ofp_port
;
3698 if (vsp_adjust_flow(ofproto_dpif_cast(port
->up
.ofproto
), flow
)) {
3700 /* Make the packet resemble the flow, so that it gets sent to
3701 * an OpenFlow controller properly, so that it looks correct
3702 * for sFlow, and so that flow_extract() will get the correct
3703 * vlan_tci if it is called on 'packet'.
3705 * The allocated space inside 'packet' probably also contains
3706 * 'key', that is, both 'packet' and 'key' are probably part of
3707 * a struct dpif_upcall (see the large comment on that
3708 * structure definition), so pushing data on 'packet' is in
3709 * general not a good idea since it could overwrite 'key' or
3710 * free it as a side effect. However, it's OK in this special
3711 * case because we know that 'packet' is inside a Netlink
3712 * attribute: pushing 4 bytes will just overwrite the 4-byte
3713 * "struct nlattr", which is fine since we don't need that
3714 * header anymore. */
3715 eth_push_vlan(packet
, flow
->vlan_tci
);
3717 /* We can't reproduce 'key' from 'flow'. */
3718 fitness
= fitness
== ODP_FIT_PERFECT
? ODP_FIT_TOO_MUCH
: fitness
;
3724 *ofproto
= ofproto_dpif_cast(port
->up
.ofproto
);
3729 *fitnessp
= fitness
;
3735 handle_miss_upcalls(struct dpif_backer
*backer
, struct dpif_upcall
*upcalls
,
3738 struct dpif_upcall
*upcall
;
3739 struct flow_miss
*miss
;
3740 struct flow_miss misses
[FLOW_MISS_MAX_BATCH
];
3741 struct flow_miss_op flow_miss_ops
[FLOW_MISS_MAX_BATCH
* 2];
3742 struct dpif_op
*dpif_ops
[FLOW_MISS_MAX_BATCH
* 2];
3752 /* Construct the to-do list.
3754 * This just amounts to extracting the flow from each packet and sticking
3755 * the packets that have the same flow in the same "flow_miss" structure so
3756 * that we can process them together. */
3759 for (upcall
= upcalls
; upcall
< &upcalls
[n_upcalls
]; upcall
++) {
3760 struct flow_miss
*miss
= &misses
[n_misses
];
3761 struct flow_miss
*existing_miss
;
3762 struct ofproto_dpif
*ofproto
;
3763 uint32_t odp_in_port
;
3768 error
= ofproto_receive(backer
, upcall
->packet
, upcall
->key
,
3769 upcall
->key_len
, &flow
, &miss
->key_fitness
,
3770 &ofproto
, &odp_in_port
, &miss
->initial_tci
);
3771 if (error
== ENODEV
) {
3772 struct drop_key
*drop_key
;
3774 /* Received packet on port for which we couldn't associate
3775 * an ofproto. This can happen if a port is removed while
3776 * traffic is being received. Print a rate-limited message
3777 * in case it happens frequently. Install a drop flow so
3778 * that future packets of the flow are inexpensively dropped
3780 VLOG_INFO_RL(&rl
, "received packet on unassociated port %"PRIu32
,
3783 drop_key
= drop_key_lookup(backer
, upcall
->key
, upcall
->key_len
);
3785 drop_key
= xmalloc(sizeof *drop_key
);
3786 drop_key
->key
= xmemdup(upcall
->key
, upcall
->key_len
);
3787 drop_key
->key_len
= upcall
->key_len
;
3789 hmap_insert(&backer
->drop_keys
, &drop_key
->hmap_node
,
3790 hash_bytes(drop_key
->key
, drop_key
->key_len
, 0));
3791 dpif_flow_put(backer
->dpif
, DPIF_FP_CREATE
| DPIF_FP_MODIFY
,
3792 drop_key
->key
, drop_key
->key_len
, NULL
, 0, NULL
);
3799 flow_extract(upcall
->packet
, flow
.skb_priority
, flow
.skb_mark
,
3800 &flow
.tunnel
, flow
.in_port
, &miss
->flow
);
3802 /* Add other packets to a to-do list. */
3803 hash
= flow_hash(&miss
->flow
, 0);
3804 existing_miss
= flow_miss_find(&todo
, ofproto
, &miss
->flow
, hash
);
3805 if (!existing_miss
) {
3806 hmap_insert(&todo
, &miss
->hmap_node
, hash
);
3807 miss
->ofproto
= ofproto
;
3808 miss
->key
= upcall
->key
;
3809 miss
->key_len
= upcall
->key_len
;
3810 miss
->upcall_type
= upcall
->type
;
3811 miss
->odp_in_port
= odp_in_port
;
3812 list_init(&miss
->packets
);
3816 miss
= existing_miss
;
3818 list_push_back(&miss
->packets
, &upcall
->packet
->list_node
);
3821 /* Process each element in the to-do list, constructing the set of
3822 * operations to batch. */
3824 HMAP_FOR_EACH (miss
, hmap_node
, &todo
) {
3825 handle_flow_miss(miss
, flow_miss_ops
, &n_ops
);
3827 ovs_assert(n_ops
<= ARRAY_SIZE(flow_miss_ops
));
3829 /* Execute batch. */
3830 for (i
= 0; i
< n_ops
; i
++) {
3831 dpif_ops
[i
] = &flow_miss_ops
[i
].dpif_op
;
3833 dpif_operate(backer
->dpif
, dpif_ops
, n_ops
);
3836 for (i
= 0; i
< n_ops
; i
++) {
3837 free(flow_miss_ops
[i
].garbage
);
3839 hmap_destroy(&todo
);
3842 static enum { SFLOW_UPCALL
, MISS_UPCALL
, BAD_UPCALL
}
3843 classify_upcall(const struct dpif_upcall
*upcall
)
3845 union user_action_cookie cookie
;
3847 /* First look at the upcall type. */
3848 switch (upcall
->type
) {
3849 case DPIF_UC_ACTION
:
3855 case DPIF_N_UC_TYPES
:
3857 VLOG_WARN_RL(&rl
, "upcall has unexpected type %"PRIu32
, upcall
->type
);
3861 /* "action" upcalls need a closer look. */
3862 if (!upcall
->userdata
) {
3863 VLOG_WARN_RL(&rl
, "action upcall missing cookie");
3866 if (nl_attr_get_size(upcall
->userdata
) != sizeof(cookie
)) {
3867 VLOG_WARN_RL(&rl
, "action upcall cookie has unexpected size %zu",
3868 nl_attr_get_size(upcall
->userdata
));
3871 memcpy(&cookie
, nl_attr_get(upcall
->userdata
), sizeof(cookie
));
3872 switch (cookie
.type
) {
3873 case USER_ACTION_COOKIE_SFLOW
:
3874 return SFLOW_UPCALL
;
3876 case USER_ACTION_COOKIE_SLOW_PATH
:
3879 case USER_ACTION_COOKIE_UNSPEC
:
3881 VLOG_WARN_RL(&rl
, "invalid user cookie : 0x%"PRIx64
,
3882 nl_attr_get_u64(upcall
->userdata
));
3888 handle_sflow_upcall(struct dpif_backer
*backer
,
3889 const struct dpif_upcall
*upcall
)
3891 struct ofproto_dpif
*ofproto
;
3892 union user_action_cookie cookie
;
3894 uint32_t odp_in_port
;
3896 if (ofproto_receive(backer
, upcall
->packet
, upcall
->key
, upcall
->key_len
,
3897 &flow
, NULL
, &ofproto
, &odp_in_port
, NULL
)
3898 || !ofproto
->sflow
) {
3902 memcpy(&cookie
, nl_attr_get(upcall
->userdata
), sizeof(cookie
));
3903 dpif_sflow_received(ofproto
->sflow
, upcall
->packet
, &flow
,
3904 odp_in_port
, &cookie
);
3908 handle_upcalls(struct dpif_backer
*backer
, unsigned int max_batch
)
3910 struct dpif_upcall misses
[FLOW_MISS_MAX_BATCH
];
3911 struct ofpbuf miss_bufs
[FLOW_MISS_MAX_BATCH
];
3912 uint64_t miss_buf_stubs
[FLOW_MISS_MAX_BATCH
][4096 / 8];
3917 ovs_assert(max_batch
<= FLOW_MISS_MAX_BATCH
);
3920 for (n_processed
= 0; n_processed
< max_batch
; n_processed
++) {
3921 struct dpif_upcall
*upcall
= &misses
[n_misses
];
3922 struct ofpbuf
*buf
= &miss_bufs
[n_misses
];
3925 ofpbuf_use_stub(buf
, miss_buf_stubs
[n_misses
],
3926 sizeof miss_buf_stubs
[n_misses
]);
3927 error
= dpif_recv(backer
->dpif
, upcall
, buf
);
3933 switch (classify_upcall(upcall
)) {
3935 /* Handle it later. */
3940 handle_sflow_upcall(backer
, upcall
);
3950 /* Handle deferred MISS_UPCALL processing. */
3951 handle_miss_upcalls(backer
, misses
, n_misses
);
3952 for (i
= 0; i
< n_misses
; i
++) {
3953 ofpbuf_uninit(&miss_bufs
[i
]);
3959 /* Flow expiration. */
3961 static int subfacet_max_idle(const struct ofproto_dpif
*);
3962 static void update_stats(struct dpif_backer
*);
3963 static void rule_expire(struct rule_dpif
*);
3964 static void expire_subfacets(struct ofproto_dpif
*, int dp_max_idle
);
3966 /* This function is called periodically by run(). Its job is to collect
3967 * updates for the flows that have been installed into the datapath, most
3968 * importantly when they last were used, and then use that information to
3969 * expire flows that have not been used recently.
3971 * Returns the number of milliseconds after which it should be called again. */
3973 expire(struct dpif_backer
*backer
)
3975 struct ofproto_dpif
*ofproto
;
3976 int max_idle
= INT32_MAX
;
3978 /* Periodically clear out the drop keys in an effort to keep them
3979 * relatively few. */
3980 drop_key_clear(backer
);
3982 /* Update stats for each flow in the backer. */
3983 update_stats(backer
);
3985 HMAP_FOR_EACH (ofproto
, all_ofproto_dpifs_node
, &all_ofproto_dpifs
) {
3986 struct rule
*rule
, *next_rule
;
3989 if (ofproto
->backer
!= backer
) {
3993 /* Expire subfacets that have been idle too long. */
3994 dp_max_idle
= subfacet_max_idle(ofproto
);
3995 expire_subfacets(ofproto
, dp_max_idle
);
3997 max_idle
= MIN(max_idle
, dp_max_idle
);
3999 /* Expire OpenFlow flows whose idle_timeout or hard_timeout
4001 LIST_FOR_EACH_SAFE (rule
, next_rule
, expirable
,
4002 &ofproto
->up
.expirable
) {
4003 rule_expire(rule_dpif_cast(rule
));
4006 /* All outstanding data in existing flows has been accounted, so it's a
4007 * good time to do bond rebalancing. */
4008 if (ofproto
->has_bonded_bundles
) {
4009 struct ofbundle
*bundle
;
4011 HMAP_FOR_EACH (bundle
, hmap_node
, &ofproto
->bundles
) {
4013 bond_rebalance(bundle
->bond
, &backer
->revalidate_set
);
4019 return MIN(max_idle
, 1000);
4022 /* Updates flow table statistics given that the datapath just reported 'stats'
4023 * as 'subfacet''s statistics. */
4025 update_subfacet_stats(struct subfacet
*subfacet
,
4026 const struct dpif_flow_stats
*stats
)
4028 struct facet
*facet
= subfacet
->facet
;
4030 if (stats
->n_packets
>= subfacet
->dp_packet_count
) {
4031 uint64_t extra
= stats
->n_packets
- subfacet
->dp_packet_count
;
4032 facet
->packet_count
+= extra
;
4034 VLOG_WARN_RL(&rl
, "unexpected packet count from the datapath");
4037 if (stats
->n_bytes
>= subfacet
->dp_byte_count
) {
4038 facet
->byte_count
+= stats
->n_bytes
- subfacet
->dp_byte_count
;
4040 VLOG_WARN_RL(&rl
, "unexpected byte count from datapath");
4043 subfacet
->dp_packet_count
= stats
->n_packets
;
4044 subfacet
->dp_byte_count
= stats
->n_bytes
;
4046 facet
->tcp_flags
|= stats
->tcp_flags
;
4048 subfacet_update_time(subfacet
, stats
->used
);
4049 if (facet
->accounted_bytes
< facet
->byte_count
) {
4051 facet_account(facet
);
4052 facet
->accounted_bytes
= facet
->byte_count
;
4054 facet_push_stats(facet
);
4057 /* 'key' with length 'key_len' bytes is a flow in 'dpif' that we know nothing
4058 * about, or a flow that shouldn't be installed but was anyway. Delete it. */
4060 delete_unexpected_flow(struct ofproto_dpif
*ofproto
,
4061 const struct nlattr
*key
, size_t key_len
)
4063 if (!VLOG_DROP_WARN(&rl
)) {
4067 odp_flow_key_format(key
, key_len
, &s
);
4068 VLOG_WARN("unexpected flow on %s: %s", ofproto
->up
.name
, ds_cstr(&s
));
4072 COVERAGE_INC(facet_unexpected
);
4073 dpif_flow_del(ofproto
->backer
->dpif
, key
, key_len
, NULL
);
4076 /* Update 'packet_count', 'byte_count', and 'used' members of installed facets.
4078 * This function also pushes statistics updates to rules which each facet
4079 * resubmits into. Generally these statistics will be accurate. However, if a
4080 * facet changes the rule it resubmits into at some time in between
4081 * update_stats() runs, it is possible that statistics accrued to the
4082 * old rule will be incorrectly attributed to the new rule. This could be
4083 * avoided by calling update_stats() whenever rules are created or
4084 * deleted. However, the performance impact of making so many calls to the
4085 * datapath do not justify the benefit of having perfectly accurate statistics.
4088 update_stats(struct dpif_backer
*backer
)
4090 const struct dpif_flow_stats
*stats
;
4091 struct dpif_flow_dump dump
;
4092 const struct nlattr
*key
;
4095 dpif_flow_dump_start(&dump
, backer
->dpif
);
4096 while (dpif_flow_dump_next(&dump
, &key
, &key_len
, NULL
, NULL
, &stats
)) {
4098 struct subfacet
*subfacet
;
4099 struct ofproto_dpif
*ofproto
;
4100 struct ofport_dpif
*ofport
;
4103 if (ofproto_receive(backer
, NULL
, key
, key_len
, &flow
, NULL
, &ofproto
,
4108 ofport
= get_ofp_port(ofproto
, flow
.in_port
);
4109 if (ofport
&& ofport
->tnl_port
) {
4110 netdev_vport_inc_rx(ofport
->up
.netdev
, stats
);
4113 key_hash
= odp_flow_key_hash(key
, key_len
);
4114 subfacet
= subfacet_find(ofproto
, key
, key_len
, key_hash
);
4115 switch (subfacet
? subfacet
->path
: SF_NOT_INSTALLED
) {
4117 update_subfacet_stats(subfacet
, stats
);
4121 /* Stats are updated per-packet. */
4124 case SF_NOT_INSTALLED
:
4126 delete_unexpected_flow(ofproto
, key
, key_len
);
4130 dpif_flow_dump_done(&dump
);
4133 /* Calculates and returns the number of milliseconds of idle time after which
4134 * subfacets should expire from the datapath. When a subfacet expires, we fold
4135 * its statistics into its facet, and when a facet's last subfacet expires, we
4136 * fold its statistic into its rule. */
4138 subfacet_max_idle(const struct ofproto_dpif
*ofproto
)
4141 * Idle time histogram.
4143 * Most of the time a switch has a relatively small number of subfacets.
4144 * When this is the case we might as well keep statistics for all of them
4145 * in userspace and to cache them in the kernel datapath for performance as
4148 * As the number of subfacets increases, the memory required to maintain
4149 * statistics about them in userspace and in the kernel becomes
4150 * significant. However, with a large number of subfacets it is likely
4151 * that only a few of them are "heavy hitters" that consume a large amount
4152 * of bandwidth. At this point, only heavy hitters are worth caching in
4153 * the kernel and maintaining in userspaces; other subfacets we can
4156 * The technique used to compute the idle time is to build a histogram with
4157 * N_BUCKETS buckets whose width is BUCKET_WIDTH msecs each. Each subfacet
4158 * that is installed in the kernel gets dropped in the appropriate bucket.
4159 * After the histogram has been built, we compute the cutoff so that only
4160 * the most-recently-used 1% of subfacets (but at least
4161 * ofproto->up.flow_eviction_threshold flows) are kept cached. At least
4162 * the most-recently-used bucket of subfacets is kept, so actually an
4163 * arbitrary number of subfacets can be kept in any given expiration run
4164 * (though the next run will delete most of those unless they receive
4167 * This requires a second pass through the subfacets, in addition to the
4168 * pass made by update_stats(), because the former function never looks at
4169 * uninstallable subfacets.
4171 enum { BUCKET_WIDTH
= ROUND_UP(100, TIME_UPDATE_INTERVAL
) };
4172 enum { N_BUCKETS
= 5000 / BUCKET_WIDTH
};
4173 int buckets
[N_BUCKETS
] = { 0 };
4174 int total
, subtotal
, bucket
;
4175 struct subfacet
*subfacet
;
4179 total
= hmap_count(&ofproto
->subfacets
);
4180 if (total
<= ofproto
->up
.flow_eviction_threshold
) {
4181 return N_BUCKETS
* BUCKET_WIDTH
;
4184 /* Build histogram. */
4186 HMAP_FOR_EACH (subfacet
, hmap_node
, &ofproto
->subfacets
) {
4187 long long int idle
= now
- subfacet
->used
;
4188 int bucket
= (idle
<= 0 ? 0
4189 : idle
>= BUCKET_WIDTH
* N_BUCKETS
? N_BUCKETS
- 1
4190 : (unsigned int) idle
/ BUCKET_WIDTH
);
4194 /* Find the first bucket whose flows should be expired. */
4195 subtotal
= bucket
= 0;
4197 subtotal
+= buckets
[bucket
++];
4198 } while (bucket
< N_BUCKETS
&&
4199 subtotal
< MAX(ofproto
->up
.flow_eviction_threshold
, total
/ 100));
4201 if (VLOG_IS_DBG_ENABLED()) {
4205 ds_put_cstr(&s
, "keep");
4206 for (i
= 0; i
< N_BUCKETS
; i
++) {
4208 ds_put_cstr(&s
, ", drop");
4211 ds_put_format(&s
, " %d:%d", i
* BUCKET_WIDTH
, buckets
[i
]);
4214 VLOG_INFO("%s: %s (msec:count)", ofproto
->up
.name
, ds_cstr(&s
));
4218 return bucket
* BUCKET_WIDTH
;
4222 expire_subfacets(struct ofproto_dpif
*ofproto
, int dp_max_idle
)
4224 /* Cutoff time for most flows. */
4225 long long int normal_cutoff
= time_msec() - dp_max_idle
;
4227 /* We really want to keep flows for special protocols around, so use a more
4228 * conservative cutoff. */
4229 long long int special_cutoff
= time_msec() - 10000;
4231 struct subfacet
*subfacet
, *next_subfacet
;
4232 struct subfacet
*batch
[SUBFACET_DESTROY_MAX_BATCH
];
4236 HMAP_FOR_EACH_SAFE (subfacet
, next_subfacet
, hmap_node
,
4237 &ofproto
->subfacets
) {
4238 long long int cutoff
;
4240 cutoff
= (subfacet
->slow
& (SLOW_CFM
| SLOW_LACP
| SLOW_STP
)
4243 if (subfacet
->used
< cutoff
) {
4244 if (subfacet
->path
!= SF_NOT_INSTALLED
) {
4245 batch
[n_batch
++] = subfacet
;
4246 if (n_batch
>= SUBFACET_DESTROY_MAX_BATCH
) {
4247 subfacet_destroy_batch(ofproto
, batch
, n_batch
);
4251 subfacet_destroy(subfacet
);
4257 subfacet_destroy_batch(ofproto
, batch
, n_batch
);
4261 /* If 'rule' is an OpenFlow rule, that has expired according to OpenFlow rules,
4262 * then delete it entirely. */
4264 rule_expire(struct rule_dpif
*rule
)
4266 struct facet
*facet
, *next_facet
;
4270 if (rule
->up
.pending
) {
4271 /* We'll have to expire it later. */
4275 /* Has 'rule' expired? */
4277 if (rule
->up
.hard_timeout
4278 && now
> rule
->up
.modified
+ rule
->up
.hard_timeout
* 1000) {
4279 reason
= OFPRR_HARD_TIMEOUT
;
4280 } else if (rule
->up
.idle_timeout
4281 && now
> rule
->up
.used
+ rule
->up
.idle_timeout
* 1000) {
4282 reason
= OFPRR_IDLE_TIMEOUT
;
4287 COVERAGE_INC(ofproto_dpif_expired
);
4289 /* Update stats. (This is a no-op if the rule expired due to an idle
4290 * timeout, because that only happens when the rule has no facets left.) */
4291 LIST_FOR_EACH_SAFE (facet
, next_facet
, list_node
, &rule
->facets
) {
4292 facet_remove(facet
);
4295 /* Get rid of the rule. */
4296 ofproto_rule_expire(&rule
->up
, reason
);
4301 /* Creates and returns a new facet owned by 'rule', given a 'flow'.
4303 * The caller must already have determined that no facet with an identical
4304 * 'flow' exists in 'ofproto' and that 'flow' is the best match for 'rule' in
4305 * the ofproto's classifier table.
4307 * 'hash' must be the return value of flow_hash(flow, 0).
4309 * The facet will initially have no subfacets. The caller should create (at
4310 * least) one subfacet with subfacet_create(). */
4311 static struct facet
*
4312 facet_create(struct rule_dpif
*rule
, const struct flow
*flow
, uint32_t hash
)
4314 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(rule
->up
.ofproto
);
4315 struct facet
*facet
;
4317 facet
= xzalloc(sizeof *facet
);
4318 facet
->used
= time_msec();
4319 hmap_insert(&ofproto
->facets
, &facet
->hmap_node
, hash
);
4320 list_push_back(&rule
->facets
, &facet
->list_node
);
4322 facet
->flow
= *flow
;
4323 list_init(&facet
->subfacets
);
4324 netflow_flow_init(&facet
->nf_flow
);
4325 netflow_flow_update_time(ofproto
->netflow
, &facet
->nf_flow
, facet
->used
);
4331 facet_free(struct facet
*facet
)
4336 /* Executes, within 'ofproto', the 'n_actions' actions in 'actions' on
4337 * 'packet', which arrived on 'in_port'. */
4339 execute_odp_actions(struct ofproto_dpif
*ofproto
, const struct flow
*flow
,
4340 const struct nlattr
*odp_actions
, size_t actions_len
,
4341 struct ofpbuf
*packet
)
4343 struct odputil_keybuf keybuf
;
4347 ofpbuf_use_stack(&key
, &keybuf
, sizeof keybuf
);
4348 odp_flow_key_from_flow(&key
, flow
,
4349 ofp_port_to_odp_port(ofproto
, flow
->in_port
));
4351 error
= dpif_execute(ofproto
->backer
->dpif
, key
.data
, key
.size
,
4352 odp_actions
, actions_len
, packet
);
4356 /* Remove 'facet' from 'ofproto' and free up the associated memory:
4358 * - If 'facet' was installed in the datapath, uninstalls it and updates its
4359 * rule's statistics, via subfacet_uninstall().
4361 * - Removes 'facet' from its rule and from ofproto->facets.
4364 facet_remove(struct facet
*facet
)
4366 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(facet
->rule
->up
.ofproto
);
4367 struct subfacet
*subfacet
, *next_subfacet
;
4369 ovs_assert(!list_is_empty(&facet
->subfacets
));
4371 /* First uninstall all of the subfacets to get final statistics. */
4372 LIST_FOR_EACH (subfacet
, list_node
, &facet
->subfacets
) {
4373 subfacet_uninstall(subfacet
);
4376 /* Flush the final stats to the rule.
4378 * This might require us to have at least one subfacet around so that we
4379 * can use its actions for accounting in facet_account(), which is why we
4380 * have uninstalled but not yet destroyed the subfacets. */
4381 facet_flush_stats(facet
);
4383 /* Now we're really all done so destroy everything. */
4384 LIST_FOR_EACH_SAFE (subfacet
, next_subfacet
, list_node
,
4385 &facet
->subfacets
) {
4386 subfacet_destroy__(subfacet
);
4388 hmap_remove(&ofproto
->facets
, &facet
->hmap_node
);
4389 list_remove(&facet
->list_node
);
4393 /* Feed information from 'facet' back into the learning table to keep it in
4394 * sync with what is actually flowing through the datapath. */
4396 facet_learn(struct facet
*facet
)
4398 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(facet
->rule
->up
.ofproto
);
4399 struct action_xlate_ctx ctx
;
4401 if (!facet
->has_learn
4402 && !facet
->has_normal
4403 && (!facet
->has_fin_timeout
4404 || !(facet
->tcp_flags
& (TCP_FIN
| TCP_RST
)))) {
4408 action_xlate_ctx_init(&ctx
, ofproto
, &facet
->flow
,
4409 facet
->flow
.vlan_tci
,
4410 facet
->rule
, facet
->tcp_flags
, NULL
);
4411 ctx
.may_learn
= true;
4412 xlate_actions_for_side_effects(&ctx
, facet
->rule
->up
.ofpacts
,
4413 facet
->rule
->up
.ofpacts_len
);
4417 facet_account(struct facet
*facet
)
4419 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(facet
->rule
->up
.ofproto
);
4420 struct subfacet
*subfacet
;
4421 const struct nlattr
*a
;
4426 if (!facet
->has_normal
|| !ofproto
->has_bonded_bundles
) {
4429 n_bytes
= facet
->byte_count
- facet
->accounted_bytes
;
4431 /* This loop feeds byte counters to bond_account() for rebalancing to use
4432 * as a basis. We also need to track the actual VLAN on which the packet
4433 * is going to be sent to ensure that it matches the one passed to
4434 * bond_choose_output_slave(). (Otherwise, we will account to the wrong
4437 * We use the actions from an arbitrary subfacet because they should all
4438 * be equally valid for our purpose. */
4439 subfacet
= CONTAINER_OF(list_front(&facet
->subfacets
),
4440 struct subfacet
, list_node
);
4441 vlan_tci
= facet
->flow
.vlan_tci
;
4442 NL_ATTR_FOR_EACH_UNSAFE (a
, left
,
4443 subfacet
->actions
, subfacet
->actions_len
) {
4444 const struct ovs_action_push_vlan
*vlan
;
4445 struct ofport_dpif
*port
;
4447 switch (nl_attr_type(a
)) {
4448 case OVS_ACTION_ATTR_OUTPUT
:
4449 port
= get_odp_port(ofproto
, nl_attr_get_u32(a
));
4450 if (port
&& port
->bundle
&& port
->bundle
->bond
) {
4451 bond_account(port
->bundle
->bond
, &facet
->flow
,
4452 vlan_tci_to_vid(vlan_tci
), n_bytes
);
4456 case OVS_ACTION_ATTR_POP_VLAN
:
4457 vlan_tci
= htons(0);
4460 case OVS_ACTION_ATTR_PUSH_VLAN
:
4461 vlan
= nl_attr_get(a
);
4462 vlan_tci
= vlan
->vlan_tci
;
4468 /* Returns true if the only action for 'facet' is to send to the controller.
4469 * (We don't report NetFlow expiration messages for such facets because they
4470 * are just part of the control logic for the network, not real traffic). */
4472 facet_is_controller_flow(struct facet
*facet
)
4475 const struct rule
*rule
= &facet
->rule
->up
;
4476 const struct ofpact
*ofpacts
= rule
->ofpacts
;
4477 size_t ofpacts_len
= rule
->ofpacts_len
;
4479 if (ofpacts_len
> 0 &&
4480 ofpacts
->type
== OFPACT_CONTROLLER
&&
4481 ofpact_next(ofpacts
) >= ofpact_end(ofpacts
, ofpacts_len
)) {
4488 /* Folds all of 'facet''s statistics into its rule. Also updates the
4489 * accounting ofhook and emits a NetFlow expiration if appropriate. All of
4490 * 'facet''s statistics in the datapath should have been zeroed and folded into
4491 * its packet and byte counts before this function is called. */
4493 facet_flush_stats(struct facet
*facet
)
4495 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(facet
->rule
->up
.ofproto
);
4496 struct subfacet
*subfacet
;
4498 LIST_FOR_EACH (subfacet
, list_node
, &facet
->subfacets
) {
4499 ovs_assert(!subfacet
->dp_byte_count
);
4500 ovs_assert(!subfacet
->dp_packet_count
);
4503 facet_push_stats(facet
);
4504 if (facet
->accounted_bytes
< facet
->byte_count
) {
4505 facet_account(facet
);
4506 facet
->accounted_bytes
= facet
->byte_count
;
4509 if (ofproto
->netflow
&& !facet_is_controller_flow(facet
)) {
4510 struct ofexpired expired
;
4511 expired
.flow
= facet
->flow
;
4512 expired
.packet_count
= facet
->packet_count
;
4513 expired
.byte_count
= facet
->byte_count
;
4514 expired
.used
= facet
->used
;
4515 netflow_expire(ofproto
->netflow
, &facet
->nf_flow
, &expired
);
4518 facet
->rule
->packet_count
+= facet
->packet_count
;
4519 facet
->rule
->byte_count
+= facet
->byte_count
;
4521 /* Reset counters to prevent double counting if 'facet' ever gets
4523 facet_reset_counters(facet
);
4525 netflow_flow_clear(&facet
->nf_flow
);
4526 facet
->tcp_flags
= 0;
4529 /* Searches 'ofproto''s table of facets for one exactly equal to 'flow'.
4530 * Returns it if found, otherwise a null pointer.
4532 * 'hash' must be the return value of flow_hash(flow, 0).
4534 * The returned facet might need revalidation; use facet_lookup_valid()
4535 * instead if that is important. */
4536 static struct facet
*
4537 facet_find(struct ofproto_dpif
*ofproto
,
4538 const struct flow
*flow
, uint32_t hash
)
4540 struct facet
*facet
;
4542 HMAP_FOR_EACH_WITH_HASH (facet
, hmap_node
, hash
, &ofproto
->facets
) {
4543 if (flow_equal(flow
, &facet
->flow
)) {
4551 /* Searches 'ofproto''s table of facets for one exactly equal to 'flow'.
4552 * Returns it if found, otherwise a null pointer.
4554 * 'hash' must be the return value of flow_hash(flow, 0).
4556 * The returned facet is guaranteed to be valid. */
4557 static struct facet
*
4558 facet_lookup_valid(struct ofproto_dpif
*ofproto
, const struct flow
*flow
,
4561 struct facet
*facet
;
4563 facet
= facet_find(ofproto
, flow
, hash
);
4565 && (ofproto
->backer
->need_revalidate
4566 || tag_set_intersects(&ofproto
->backer
->revalidate_set
,
4568 facet_revalidate(facet
);
4570 /* facet_revalidate() may have destroyed 'facet'. */
4571 facet
= facet_find(ofproto
, flow
, hash
);
4578 subfacet_path_to_string(enum subfacet_path path
)
4581 case SF_NOT_INSTALLED
:
4582 return "not installed";
4584 return "in fast path";
4586 return "in slow path";
4592 /* Returns the path in which a subfacet should be installed if its 'slow'
4593 * member has the specified value. */
4594 static enum subfacet_path
4595 subfacet_want_path(enum slow_path_reason slow
)
4597 return slow
? SF_SLOW_PATH
: SF_FAST_PATH
;
4600 /* Returns true if 'subfacet' needs to have its datapath flow updated,
4601 * supposing that its actions have been recalculated as 'want_actions' and that
4602 * 'slow' is nonzero iff 'subfacet' should be in the slow path. */
4604 subfacet_should_install(struct subfacet
*subfacet
, enum slow_path_reason slow
,
4605 const struct ofpbuf
*want_actions
)
4607 enum subfacet_path want_path
= subfacet_want_path(slow
);
4608 return (want_path
!= subfacet
->path
4609 || (want_path
== SF_FAST_PATH
4610 && (subfacet
->actions_len
!= want_actions
->size
4611 || memcmp(subfacet
->actions
, want_actions
->data
,
4612 subfacet
->actions_len
))));
4616 facet_check_consistency(struct facet
*facet
)
4618 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(1, 15);
4620 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(facet
->rule
->up
.ofproto
);
4622 uint64_t odp_actions_stub
[1024 / 8];
4623 struct ofpbuf odp_actions
;
4625 struct rule_dpif
*rule
;
4626 struct subfacet
*subfacet
;
4627 bool may_log
= false;
4630 /* Check the rule for consistency. */
4631 rule
= rule_dpif_lookup(ofproto
, &facet
->flow
);
4632 ok
= rule
== facet
->rule
;
4634 may_log
= !VLOG_DROP_WARN(&rl
);
4639 flow_format(&s
, &facet
->flow
);
4640 ds_put_format(&s
, ": facet associated with wrong rule (was "
4641 "table=%"PRIu8
",", facet
->rule
->up
.table_id
);
4642 cls_rule_format(&facet
->rule
->up
.cr
, &s
);
4643 ds_put_format(&s
, ") (should have been table=%"PRIu8
",",
4645 cls_rule_format(&rule
->up
.cr
, &s
);
4646 ds_put_char(&s
, ')');
4648 VLOG_WARN("%s", ds_cstr(&s
));
4653 /* Check the datapath actions for consistency. */
4654 ofpbuf_use_stub(&odp_actions
, odp_actions_stub
, sizeof odp_actions_stub
);
4655 LIST_FOR_EACH (subfacet
, list_node
, &facet
->subfacets
) {
4656 enum subfacet_path want_path
;
4657 struct action_xlate_ctx ctx
;
4660 action_xlate_ctx_init(&ctx
, ofproto
, &facet
->flow
,
4661 subfacet
->initial_tci
, rule
, 0, NULL
);
4662 xlate_actions(&ctx
, rule
->up
.ofpacts
, rule
->up
.ofpacts_len
,
4665 if (subfacet
->path
== SF_NOT_INSTALLED
) {
4666 /* This only happens if the datapath reported an error when we
4667 * tried to install the flow. Don't flag another error here. */
4671 want_path
= subfacet_want_path(subfacet
->slow
);
4672 if (want_path
== SF_SLOW_PATH
&& subfacet
->path
== SF_SLOW_PATH
) {
4673 /* The actions for slow-path flows may legitimately vary from one
4674 * packet to the next. We're done. */
4678 if (!subfacet_should_install(subfacet
, subfacet
->slow
, &odp_actions
)) {
4682 /* Inconsistency! */
4684 may_log
= !VLOG_DROP_WARN(&rl
);
4688 /* Rate-limited, skip reporting. */
4693 odp_flow_key_format(subfacet
->key
, subfacet
->key_len
, &s
);
4695 ds_put_cstr(&s
, ": inconsistency in subfacet");
4696 if (want_path
!= subfacet
->path
) {
4697 enum odp_key_fitness fitness
= subfacet
->key_fitness
;
4699 ds_put_format(&s
, " (%s, fitness=%s)",
4700 subfacet_path_to_string(subfacet
->path
),
4701 odp_key_fitness_to_string(fitness
));
4702 ds_put_format(&s
, " (should have been %s)",
4703 subfacet_path_to_string(want_path
));
4704 } else if (want_path
== SF_FAST_PATH
) {
4705 ds_put_cstr(&s
, " (actions were: ");
4706 format_odp_actions(&s
, subfacet
->actions
,
4707 subfacet
->actions_len
);
4708 ds_put_cstr(&s
, ") (correct actions: ");
4709 format_odp_actions(&s
, odp_actions
.data
, odp_actions
.size
);
4710 ds_put_char(&s
, ')');
4712 ds_put_cstr(&s
, " (actions: ");
4713 format_odp_actions(&s
, subfacet
->actions
,
4714 subfacet
->actions_len
);
4715 ds_put_char(&s
, ')');
4717 VLOG_WARN("%s", ds_cstr(&s
));
4720 ofpbuf_uninit(&odp_actions
);
4725 /* Re-searches the classifier for 'facet':
4727 * - If the rule found is different from 'facet''s current rule, moves
4728 * 'facet' to the new rule and recompiles its actions.
4730 * - If the rule found is the same as 'facet''s current rule, leaves 'facet'
4731 * where it is and recompiles its actions anyway.
4733 * - If any of 'facet''s subfacets correspond to a new flow according to
4734 * ofproto_receive(), 'facet' is removed. */
4736 facet_revalidate(struct facet
*facet
)
4738 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(facet
->rule
->up
.ofproto
);
4740 struct nlattr
*odp_actions
;
4743 struct actions
*new_actions
;
4745 struct action_xlate_ctx ctx
;
4746 uint64_t odp_actions_stub
[1024 / 8];
4747 struct ofpbuf odp_actions
;
4749 struct rule_dpif
*new_rule
;
4750 struct subfacet
*subfacet
;
4753 COVERAGE_INC(facet_revalidate
);
4755 /* Check that child subfacets still correspond to this facet. Tunnel
4756 * configuration changes could cause a subfacet's OpenFlow in_port to
4758 LIST_FOR_EACH (subfacet
, list_node
, &facet
->subfacets
) {
4759 struct ofproto_dpif
*recv_ofproto
;
4760 struct flow recv_flow
;
4763 error
= ofproto_receive(ofproto
->backer
, NULL
, subfacet
->key
,
4764 subfacet
->key_len
, &recv_flow
, NULL
,
4765 &recv_ofproto
, NULL
, NULL
);
4767 || recv_ofproto
!= ofproto
4768 || memcmp(&recv_flow
, &facet
->flow
, sizeof recv_flow
)) {
4769 facet_remove(facet
);
4774 new_rule
= rule_dpif_lookup(ofproto
, &facet
->flow
);
4776 /* Calculate new datapath actions.
4778 * We do not modify any 'facet' state yet, because we might need to, e.g.,
4779 * emit a NetFlow expiration and, if so, we need to have the old state
4780 * around to properly compose it. */
4782 /* If the datapath actions changed or the installability changed,
4783 * then we need to talk to the datapath. */
4786 memset(&ctx
, 0, sizeof ctx
);
4787 ofpbuf_use_stub(&odp_actions
, odp_actions_stub
, sizeof odp_actions_stub
);
4788 LIST_FOR_EACH (subfacet
, list_node
, &facet
->subfacets
) {
4789 enum slow_path_reason slow
;
4791 action_xlate_ctx_init(&ctx
, ofproto
, &facet
->flow
,
4792 subfacet
->initial_tci
, new_rule
, 0, NULL
);
4793 xlate_actions(&ctx
, new_rule
->up
.ofpacts
, new_rule
->up
.ofpacts_len
,
4796 slow
= (subfacet
->slow
& SLOW_MATCH
) | ctx
.slow
;
4797 if (subfacet_should_install(subfacet
, slow
, &odp_actions
)) {
4798 struct dpif_flow_stats stats
;
4800 subfacet_install(subfacet
,
4801 odp_actions
.data
, odp_actions
.size
, &stats
, slow
);
4802 subfacet_update_stats(subfacet
, &stats
);
4805 new_actions
= xcalloc(list_size(&facet
->subfacets
),
4806 sizeof *new_actions
);
4808 new_actions
[i
].odp_actions
= xmemdup(odp_actions
.data
,
4810 new_actions
[i
].actions_len
= odp_actions
.size
;
4815 ofpbuf_uninit(&odp_actions
);
4818 facet_flush_stats(facet
);
4821 /* Update 'facet' now that we've taken care of all the old state. */
4822 facet
->tags
= ctx
.tags
;
4823 facet
->nf_flow
.output_iface
= ctx
.nf_output_iface
;
4824 facet
->has_learn
= ctx
.has_learn
;
4825 facet
->has_normal
= ctx
.has_normal
;
4826 facet
->has_fin_timeout
= ctx
.has_fin_timeout
;
4827 facet
->mirrors
= ctx
.mirrors
;
4830 LIST_FOR_EACH (subfacet
, list_node
, &facet
->subfacets
) {
4831 subfacet
->slow
= (subfacet
->slow
& SLOW_MATCH
) | ctx
.slow
;
4833 if (new_actions
&& new_actions
[i
].odp_actions
) {
4834 free(subfacet
->actions
);
4835 subfacet
->actions
= new_actions
[i
].odp_actions
;
4836 subfacet
->actions_len
= new_actions
[i
].actions_len
;
4842 if (facet
->rule
!= new_rule
) {
4843 COVERAGE_INC(facet_changed_rule
);
4844 list_remove(&facet
->list_node
);
4845 list_push_back(&new_rule
->facets
, &facet
->list_node
);
4846 facet
->rule
= new_rule
;
4847 facet
->used
= new_rule
->up
.created
;
4848 facet
->prev_used
= facet
->used
;
4852 /* Updates 'facet''s used time. Caller is responsible for calling
4853 * facet_push_stats() to update the flows which 'facet' resubmits into. */
4855 facet_update_time(struct facet
*facet
, long long int used
)
4857 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(facet
->rule
->up
.ofproto
);
4858 if (used
> facet
->used
) {
4860 ofproto_rule_update_used(&facet
->rule
->up
, used
);
4861 netflow_flow_update_time(ofproto
->netflow
, &facet
->nf_flow
, used
);
4866 facet_reset_counters(struct facet
*facet
)
4868 facet
->packet_count
= 0;
4869 facet
->byte_count
= 0;
4870 facet
->prev_packet_count
= 0;
4871 facet
->prev_byte_count
= 0;
4872 facet
->accounted_bytes
= 0;
4876 facet_push_stats(struct facet
*facet
)
4878 struct dpif_flow_stats stats
;
4880 ovs_assert(facet
->packet_count
>= facet
->prev_packet_count
);
4881 ovs_assert(facet
->byte_count
>= facet
->prev_byte_count
);
4882 ovs_assert(facet
->used
>= facet
->prev_used
);
4884 stats
.n_packets
= facet
->packet_count
- facet
->prev_packet_count
;
4885 stats
.n_bytes
= facet
->byte_count
- facet
->prev_byte_count
;
4886 stats
.used
= facet
->used
;
4887 stats
.tcp_flags
= 0;
4889 if (stats
.n_packets
|| stats
.n_bytes
|| facet
->used
> facet
->prev_used
) {
4890 facet
->prev_packet_count
= facet
->packet_count
;
4891 facet
->prev_byte_count
= facet
->byte_count
;
4892 facet
->prev_used
= facet
->used
;
4894 flow_push_stats(facet
->rule
, &facet
->flow
, &stats
);
4896 update_mirror_stats(ofproto_dpif_cast(facet
->rule
->up
.ofproto
),
4897 facet
->mirrors
, stats
.n_packets
, stats
.n_bytes
);
4902 rule_credit_stats(struct rule_dpif
*rule
, const struct dpif_flow_stats
*stats
)
4904 rule
->packet_count
+= stats
->n_packets
;
4905 rule
->byte_count
+= stats
->n_bytes
;
4906 ofproto_rule_update_used(&rule
->up
, stats
->used
);
4909 /* Pushes flow statistics to the rules which 'flow' resubmits into given
4910 * 'rule''s actions and mirrors. */
4912 flow_push_stats(struct rule_dpif
*rule
,
4913 const struct flow
*flow
, const struct dpif_flow_stats
*stats
)
4915 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(rule
->up
.ofproto
);
4916 struct action_xlate_ctx ctx
;
4918 ofproto_rule_update_used(&rule
->up
, stats
->used
);
4920 action_xlate_ctx_init(&ctx
, ofproto
, flow
, flow
->vlan_tci
, rule
,
4922 ctx
.resubmit_stats
= stats
;
4923 xlate_actions_for_side_effects(&ctx
, rule
->up
.ofpacts
,
4924 rule
->up
.ofpacts_len
);
4929 static struct subfacet
*
4930 subfacet_find(struct ofproto_dpif
*ofproto
,
4931 const struct nlattr
*key
, size_t key_len
, uint32_t key_hash
)
4933 struct subfacet
*subfacet
;
4935 HMAP_FOR_EACH_WITH_HASH (subfacet
, hmap_node
, key_hash
,
4936 &ofproto
->subfacets
) {
4937 if (subfacet
->key_len
== key_len
4938 && !memcmp(key
, subfacet
->key
, key_len
)) {
4946 /* Searches 'facet' (within 'ofproto') for a subfacet with the specified
4947 * 'key_fitness', 'key', and 'key_len' members in 'miss'. Returns the
4948 * existing subfacet if there is one, otherwise creates and returns a
4951 * If the returned subfacet is new, then subfacet->actions will be NULL, in
4952 * which case the caller must populate the actions with
4953 * subfacet_make_actions(). */
4954 static struct subfacet
*
4955 subfacet_create(struct facet
*facet
, struct flow_miss
*miss
,
4958 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(facet
->rule
->up
.ofproto
);
4959 enum odp_key_fitness key_fitness
= miss
->key_fitness
;
4960 const struct nlattr
*key
= miss
->key
;
4961 size_t key_len
= miss
->key_len
;
4963 struct subfacet
*subfacet
;
4965 key_hash
= odp_flow_key_hash(key
, key_len
);
4967 if (list_is_empty(&facet
->subfacets
)) {
4968 subfacet
= &facet
->one_subfacet
;
4970 subfacet
= subfacet_find(ofproto
, key
, key_len
, key_hash
);
4972 if (subfacet
->facet
== facet
) {
4976 /* This shouldn't happen. */
4977 VLOG_ERR_RL(&rl
, "subfacet with wrong facet");
4978 subfacet_destroy(subfacet
);
4981 subfacet
= xmalloc(sizeof *subfacet
);
4984 hmap_insert(&ofproto
->subfacets
, &subfacet
->hmap_node
, key_hash
);
4985 list_push_back(&facet
->subfacets
, &subfacet
->list_node
);
4986 subfacet
->facet
= facet
;
4987 subfacet
->key_fitness
= key_fitness
;
4988 subfacet
->key
= xmemdup(key
, key_len
);
4989 subfacet
->key_len
= key_len
;
4990 subfacet
->used
= now
;
4991 subfacet
->dp_packet_count
= 0;
4992 subfacet
->dp_byte_count
= 0;
4993 subfacet
->actions_len
= 0;
4994 subfacet
->actions
= NULL
;
4995 subfacet
->slow
= (subfacet
->key_fitness
== ODP_FIT_TOO_LITTLE
4998 subfacet
->path
= SF_NOT_INSTALLED
;
4999 subfacet
->initial_tci
= miss
->initial_tci
;
5000 subfacet
->odp_in_port
= miss
->odp_in_port
;
5005 /* Uninstalls 'subfacet' from the datapath, if it is installed, removes it from
5006 * its facet within 'ofproto', and frees it. */
5008 subfacet_destroy__(struct subfacet
*subfacet
)
5010 struct facet
*facet
= subfacet
->facet
;
5011 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(facet
->rule
->up
.ofproto
);
5013 subfacet_uninstall(subfacet
);
5014 hmap_remove(&ofproto
->subfacets
, &subfacet
->hmap_node
);
5015 list_remove(&subfacet
->list_node
);
5016 free(subfacet
->key
);
5017 free(subfacet
->actions
);
5018 if (subfacet
!= &facet
->one_subfacet
) {
5023 /* Destroys 'subfacet', as with subfacet_destroy__(), and then if this was the
5024 * last remaining subfacet in its facet destroys the facet too. */
5026 subfacet_destroy(struct subfacet
*subfacet
)
5028 struct facet
*facet
= subfacet
->facet
;
5030 if (list_is_singleton(&facet
->subfacets
)) {
5031 /* facet_remove() needs at least one subfacet (it will remove it). */
5032 facet_remove(facet
);
5034 subfacet_destroy__(subfacet
);
5039 subfacet_destroy_batch(struct ofproto_dpif
*ofproto
,
5040 struct subfacet
**subfacets
, int n
)
5042 struct dpif_op ops
[SUBFACET_DESTROY_MAX_BATCH
];
5043 struct dpif_op
*opsp
[SUBFACET_DESTROY_MAX_BATCH
];
5044 struct dpif_flow_stats stats
[SUBFACET_DESTROY_MAX_BATCH
];
5047 for (i
= 0; i
< n
; i
++) {
5048 ops
[i
].type
= DPIF_OP_FLOW_DEL
;
5049 ops
[i
].u
.flow_del
.key
= subfacets
[i
]->key
;
5050 ops
[i
].u
.flow_del
.key_len
= subfacets
[i
]->key_len
;
5051 ops
[i
].u
.flow_del
.stats
= &stats
[i
];
5055 dpif_operate(ofproto
->backer
->dpif
, opsp
, n
);
5056 for (i
= 0; i
< n
; i
++) {
5057 subfacet_reset_dp_stats(subfacets
[i
], &stats
[i
]);
5058 subfacets
[i
]->path
= SF_NOT_INSTALLED
;
5059 subfacet_destroy(subfacets
[i
]);
5063 /* Composes the datapath actions for 'subfacet' based on its rule's actions.
5064 * Translates the actions into 'odp_actions', which the caller must have
5065 * initialized and is responsible for uninitializing. */
5067 subfacet_make_actions(struct subfacet
*subfacet
, const struct ofpbuf
*packet
,
5068 struct ofpbuf
*odp_actions
)
5070 struct facet
*facet
= subfacet
->facet
;
5071 struct rule_dpif
*rule
= facet
->rule
;
5072 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(rule
->up
.ofproto
);
5074 struct action_xlate_ctx ctx
;
5076 action_xlate_ctx_init(&ctx
, ofproto
, &facet
->flow
, subfacet
->initial_tci
,
5078 xlate_actions(&ctx
, rule
->up
.ofpacts
, rule
->up
.ofpacts_len
, odp_actions
);
5079 facet
->tags
= ctx
.tags
;
5080 facet
->has_learn
= ctx
.has_learn
;
5081 facet
->has_normal
= ctx
.has_normal
;
5082 facet
->has_fin_timeout
= ctx
.has_fin_timeout
;
5083 facet
->nf_flow
.output_iface
= ctx
.nf_output_iface
;
5084 facet
->mirrors
= ctx
.mirrors
;
5086 subfacet
->slow
= (subfacet
->slow
& SLOW_MATCH
) | ctx
.slow
;
5087 if (subfacet
->actions_len
!= odp_actions
->size
5088 || memcmp(subfacet
->actions
, odp_actions
->data
, odp_actions
->size
)) {
5089 free(subfacet
->actions
);
5090 subfacet
->actions_len
= odp_actions
->size
;
5091 subfacet
->actions
= xmemdup(odp_actions
->data
, odp_actions
->size
);
5095 /* Updates 'subfacet''s datapath flow, setting its actions to 'actions_len'
5096 * bytes of actions in 'actions'. If 'stats' is non-null, statistics counters
5097 * in the datapath will be zeroed and 'stats' will be updated with traffic new
5098 * since 'subfacet' was last updated.
5100 * Returns 0 if successful, otherwise a positive errno value. */
5102 subfacet_install(struct subfacet
*subfacet
,
5103 const struct nlattr
*actions
, size_t actions_len
,
5104 struct dpif_flow_stats
*stats
,
5105 enum slow_path_reason slow
)
5107 struct facet
*facet
= subfacet
->facet
;
5108 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(facet
->rule
->up
.ofproto
);
5109 enum subfacet_path path
= subfacet_want_path(slow
);
5110 uint64_t slow_path_stub
[128 / 8];
5111 enum dpif_flow_put_flags flags
;
5114 flags
= DPIF_FP_CREATE
| DPIF_FP_MODIFY
;
5116 flags
|= DPIF_FP_ZERO_STATS
;
5119 if (path
== SF_SLOW_PATH
) {
5120 compose_slow_path(ofproto
, &facet
->flow
, slow
,
5121 slow_path_stub
, sizeof slow_path_stub
,
5122 &actions
, &actions_len
);
5125 ret
= dpif_flow_put(ofproto
->backer
->dpif
, flags
, subfacet
->key
,
5126 subfacet
->key_len
, actions
, actions_len
, stats
);
5129 subfacet_reset_dp_stats(subfacet
, stats
);
5133 subfacet
->path
= path
;
5139 subfacet_reinstall(struct subfacet
*subfacet
, struct dpif_flow_stats
*stats
)
5141 return subfacet_install(subfacet
, subfacet
->actions
, subfacet
->actions_len
,
5142 stats
, subfacet
->slow
);
5145 /* If 'subfacet' is installed in the datapath, uninstalls it. */
5147 subfacet_uninstall(struct subfacet
*subfacet
)
5149 if (subfacet
->path
!= SF_NOT_INSTALLED
) {
5150 struct rule_dpif
*rule
= subfacet
->facet
->rule
;
5151 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(rule
->up
.ofproto
);
5152 struct dpif_flow_stats stats
;
5155 error
= dpif_flow_del(ofproto
->backer
->dpif
, subfacet
->key
,
5156 subfacet
->key_len
, &stats
);
5157 subfacet_reset_dp_stats(subfacet
, &stats
);
5159 subfacet_update_stats(subfacet
, &stats
);
5161 subfacet
->path
= SF_NOT_INSTALLED
;
5163 ovs_assert(subfacet
->dp_packet_count
== 0);
5164 ovs_assert(subfacet
->dp_byte_count
== 0);
5168 /* Resets 'subfacet''s datapath statistics counters. This should be called
5169 * when 'subfacet''s statistics are cleared in the datapath. If 'stats' is
5170 * non-null, it should contain the statistics returned by dpif when 'subfacet'
5171 * was reset in the datapath. 'stats' will be modified to include only
5172 * statistics new since 'subfacet' was last updated. */
5174 subfacet_reset_dp_stats(struct subfacet
*subfacet
,
5175 struct dpif_flow_stats
*stats
)
5178 && subfacet
->dp_packet_count
<= stats
->n_packets
5179 && subfacet
->dp_byte_count
<= stats
->n_bytes
) {
5180 stats
->n_packets
-= subfacet
->dp_packet_count
;
5181 stats
->n_bytes
-= subfacet
->dp_byte_count
;
5184 subfacet
->dp_packet_count
= 0;
5185 subfacet
->dp_byte_count
= 0;
5188 /* Updates 'subfacet''s used time. The caller is responsible for calling
5189 * facet_push_stats() to update the flows which 'subfacet' resubmits into. */
5191 subfacet_update_time(struct subfacet
*subfacet
, long long int used
)
5193 if (used
> subfacet
->used
) {
5194 subfacet
->used
= used
;
5195 facet_update_time(subfacet
->facet
, used
);
5199 /* Folds the statistics from 'stats' into the counters in 'subfacet'.
5201 * Because of the meaning of a subfacet's counters, it only makes sense to do
5202 * this if 'stats' are not tracked in the datapath, that is, if 'stats'
5203 * represents a packet that was sent by hand or if it represents statistics
5204 * that have been cleared out of the datapath. */
5206 subfacet_update_stats(struct subfacet
*subfacet
,
5207 const struct dpif_flow_stats
*stats
)
5209 if (stats
->n_packets
|| stats
->used
> subfacet
->used
) {
5210 struct facet
*facet
= subfacet
->facet
;
5212 subfacet_update_time(subfacet
, stats
->used
);
5213 facet
->packet_count
+= stats
->n_packets
;
5214 facet
->byte_count
+= stats
->n_bytes
;
5215 facet
->tcp_flags
|= stats
->tcp_flags
;
5216 facet_push_stats(facet
);
5217 netflow_flow_update_flags(&facet
->nf_flow
, stats
->tcp_flags
);
5223 static struct rule_dpif
*
5224 rule_dpif_lookup(struct ofproto_dpif
*ofproto
, const struct flow
*flow
)
5226 struct rule_dpif
*rule
;
5228 rule
= rule_dpif_lookup__(ofproto
, flow
, 0);
5233 return rule_dpif_miss_rule(ofproto
, flow
);
5236 static struct rule_dpif
*
5237 rule_dpif_lookup__(struct ofproto_dpif
*ofproto
, const struct flow
*flow
,
5240 struct cls_rule
*cls_rule
;
5241 struct classifier
*cls
;
5243 if (table_id
>= N_TABLES
) {
5247 cls
= &ofproto
->up
.tables
[table_id
].cls
;
5248 if (flow
->nw_frag
& FLOW_NW_FRAG_ANY
5249 && ofproto
->up
.frag_handling
== OFPC_FRAG_NORMAL
) {
5250 /* For OFPC_NORMAL frag_handling, we must pretend that transport ports
5251 * are unavailable. */
5252 struct flow ofpc_normal_flow
= *flow
;
5253 ofpc_normal_flow
.tp_src
= htons(0);
5254 ofpc_normal_flow
.tp_dst
= htons(0);
5255 cls_rule
= classifier_lookup(cls
, &ofpc_normal_flow
);
5257 cls_rule
= classifier_lookup(cls
, flow
);
5259 return rule_dpif_cast(rule_from_cls_rule(cls_rule
));
5262 static struct rule_dpif
*
5263 rule_dpif_miss_rule(struct ofproto_dpif
*ofproto
, const struct flow
*flow
)
5265 struct ofport_dpif
*port
;
5267 port
= get_ofp_port(ofproto
, flow
->in_port
);
5269 VLOG_WARN_RL(&rl
, "packet-in on unknown port %"PRIu16
, flow
->in_port
);
5270 return ofproto
->miss_rule
;
5273 if (port
->up
.pp
.config
& OFPUTIL_PC_NO_PACKET_IN
) {
5274 return ofproto
->no_packet_in_rule
;
5276 return ofproto
->miss_rule
;
5280 complete_operation(struct rule_dpif
*rule
)
5282 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(rule
->up
.ofproto
);
5284 rule_invalidate(rule
);
5286 struct dpif_completion
*c
= xmalloc(sizeof *c
);
5287 c
->op
= rule
->up
.pending
;
5288 list_push_back(&ofproto
->completions
, &c
->list_node
);
5290 ofoperation_complete(rule
->up
.pending
, 0);
5294 static struct rule
*
5297 struct rule_dpif
*rule
= xmalloc(sizeof *rule
);
5302 rule_dealloc(struct rule
*rule_
)
5304 struct rule_dpif
*rule
= rule_dpif_cast(rule_
);
5309 rule_construct(struct rule
*rule_
)
5311 struct rule_dpif
*rule
= rule_dpif_cast(rule_
);
5312 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(rule
->up
.ofproto
);
5313 struct rule_dpif
*victim
;
5316 rule
->packet_count
= 0;
5317 rule
->byte_count
= 0;
5319 victim
= rule_dpif_cast(ofoperation_get_victim(rule
->up
.pending
));
5320 if (victim
&& !list_is_empty(&victim
->facets
)) {
5321 struct facet
*facet
;
5323 rule
->facets
= victim
->facets
;
5324 list_moved(&rule
->facets
);
5325 LIST_FOR_EACH (facet
, list_node
, &rule
->facets
) {
5326 /* XXX: We're only clearing our local counters here. It's possible
5327 * that quite a few packets are unaccounted for in the datapath
5328 * statistics. These will be accounted to the new rule instead of
5329 * cleared as required. This could be fixed by clearing out the
5330 * datapath statistics for this facet, but currently it doesn't
5332 facet_reset_counters(facet
);
5336 /* Must avoid list_moved() in this case. */
5337 list_init(&rule
->facets
);
5340 table_id
= rule
->up
.table_id
;
5342 rule
->tag
= victim
->tag
;
5343 } else if (table_id
== 0) {
5348 miniflow_expand(&rule
->up
.cr
.match
.flow
, &flow
);
5349 rule
->tag
= rule_calculate_tag(&flow
, &rule
->up
.cr
.match
.mask
,
5350 ofproto
->tables
[table_id
].basis
);
5353 complete_operation(rule
);
5358 rule_destruct(struct rule
*rule_
)
5360 struct rule_dpif
*rule
= rule_dpif_cast(rule_
);
5361 struct facet
*facet
, *next_facet
;
5363 LIST_FOR_EACH_SAFE (facet
, next_facet
, list_node
, &rule
->facets
) {
5364 facet_revalidate(facet
);
5367 complete_operation(rule
);
5371 rule_get_stats(struct rule
*rule_
, uint64_t *packets
, uint64_t *bytes
)
5373 struct rule_dpif
*rule
= rule_dpif_cast(rule_
);
5374 struct facet
*facet
;
5376 /* Start from historical data for 'rule' itself that are no longer tracked
5377 * in facets. This counts, for example, facets that have expired. */
5378 *packets
= rule
->packet_count
;
5379 *bytes
= rule
->byte_count
;
5381 /* Add any statistics that are tracked by facets. This includes
5382 * statistical data recently updated by ofproto_update_stats() as well as
5383 * stats for packets that were executed "by hand" via dpif_execute(). */
5384 LIST_FOR_EACH (facet
, list_node
, &rule
->facets
) {
5385 *packets
+= facet
->packet_count
;
5386 *bytes
+= facet
->byte_count
;
5391 rule_dpif_execute(struct rule_dpif
*rule
, const struct flow
*flow
,
5392 struct ofpbuf
*packet
)
5394 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(rule
->up
.ofproto
);
5396 struct dpif_flow_stats stats
;
5398 struct action_xlate_ctx ctx
;
5399 uint64_t odp_actions_stub
[1024 / 8];
5400 struct ofpbuf odp_actions
;
5402 dpif_flow_stats_extract(flow
, packet
, time_msec(), &stats
);
5403 rule_credit_stats(rule
, &stats
);
5405 ofpbuf_use_stub(&odp_actions
, odp_actions_stub
, sizeof odp_actions_stub
);
5406 action_xlate_ctx_init(&ctx
, ofproto
, flow
, flow
->vlan_tci
,
5407 rule
, stats
.tcp_flags
, packet
);
5408 ctx
.resubmit_stats
= &stats
;
5409 xlate_actions(&ctx
, rule
->up
.ofpacts
, rule
->up
.ofpacts_len
, &odp_actions
);
5411 execute_odp_actions(ofproto
, flow
, odp_actions
.data
,
5412 odp_actions
.size
, packet
);
5414 ofpbuf_uninit(&odp_actions
);
5418 rule_execute(struct rule
*rule
, const struct flow
*flow
,
5419 struct ofpbuf
*packet
)
5421 rule_dpif_execute(rule_dpif_cast(rule
), flow
, packet
);
5422 ofpbuf_delete(packet
);
5427 rule_modify_actions(struct rule
*rule_
)
5429 struct rule_dpif
*rule
= rule_dpif_cast(rule_
);
5431 complete_operation(rule
);
5434 /* Sends 'packet' out 'ofport'.
5435 * May modify 'packet'.
5436 * Returns 0 if successful, otherwise a positive errno value. */
5438 send_packet(const struct ofport_dpif
*ofport
, struct ofpbuf
*packet
)
5440 const struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofport
->up
.ofproto
);
5441 uint64_t odp_actions_stub
[1024 / 8];
5442 struct ofpbuf key
, odp_actions
;
5443 struct odputil_keybuf keybuf
;
5448 flow_extract(packet
, 0, 0, NULL
, OFPP_LOCAL
, &flow
);
5449 if (netdev_vport_is_patch(ofport
->up
.netdev
)) {
5450 struct ofproto_dpif
*peer_ofproto
;
5451 struct dpif_flow_stats stats
;
5452 struct ofport_dpif
*peer
;
5453 struct rule_dpif
*rule
;
5455 peer
= ofport_get_peer(ofport
);
5460 dpif_flow_stats_extract(&flow
, packet
, time_msec(), &stats
);
5461 netdev_vport_inc_tx(ofport
->up
.netdev
, &stats
);
5462 netdev_vport_inc_rx(peer
->up
.netdev
, &stats
);
5464 flow
.in_port
= peer
->up
.ofp_port
;
5465 peer_ofproto
= ofproto_dpif_cast(peer
->up
.ofproto
);
5466 rule
= rule_dpif_lookup(peer_ofproto
, &flow
);
5467 rule_dpif_execute(rule
, &flow
, packet
);
5472 ofpbuf_use_stub(&odp_actions
, odp_actions_stub
, sizeof odp_actions_stub
);
5474 if (ofport
->tnl_port
) {
5475 struct dpif_flow_stats stats
;
5477 odp_port
= tnl_port_send(ofport
->tnl_port
, &flow
);
5478 if (odp_port
== OVSP_NONE
) {
5482 dpif_flow_stats_extract(&flow
, packet
, time_msec(), &stats
);
5483 netdev_vport_inc_tx(ofport
->up
.netdev
, &stats
);
5484 odp_put_tunnel_action(&flow
.tunnel
, &odp_actions
);
5485 odp_put_skb_mark_action(flow
.skb_mark
, &odp_actions
);
5487 odp_port
= vsp_realdev_to_vlandev(ofproto
, ofport
->odp_port
,
5489 if (odp_port
!= ofport
->odp_port
) {
5490 eth_pop_vlan(packet
);
5491 flow
.vlan_tci
= htons(0);
5495 ofpbuf_use_stack(&key
, &keybuf
, sizeof keybuf
);
5496 odp_flow_key_from_flow(&key
, &flow
,
5497 ofp_port_to_odp_port(ofproto
, flow
.in_port
));
5499 compose_sflow_action(ofproto
, &odp_actions
, &flow
, odp_port
);
5501 nl_msg_put_u32(&odp_actions
, OVS_ACTION_ATTR_OUTPUT
, odp_port
);
5502 error
= dpif_execute(ofproto
->backer
->dpif
,
5504 odp_actions
.data
, odp_actions
.size
,
5506 ofpbuf_uninit(&odp_actions
);
5509 VLOG_WARN_RL(&rl
, "%s: failed to send packet on port %"PRIu32
" (%s)",
5510 ofproto
->up
.name
, odp_port
, strerror(error
));
5512 ofproto_update_local_port_stats(ofport
->up
.ofproto
, packet
->size
, 0);
5516 /* OpenFlow to datapath action translation. */
5518 static bool may_receive(const struct ofport_dpif
*, struct action_xlate_ctx
*);
5519 static void do_xlate_actions(const struct ofpact
*, size_t ofpacts_len
,
5520 struct action_xlate_ctx
*);
5521 static void xlate_normal(struct action_xlate_ctx
*);
5523 /* Composes an ODP action for a "slow path" action for 'flow' within 'ofproto'.
5524 * The action will state 'slow' as the reason that the action is in the slow
5525 * path. (This is purely informational: it allows a human viewing "ovs-dpctl
5526 * dump-flows" output to see why a flow is in the slow path.)
5528 * The 'stub_size' bytes in 'stub' will be used to store the action.
5529 * 'stub_size' must be large enough for the action.
5531 * The action and its size will be stored in '*actionsp' and '*actions_lenp',
5534 compose_slow_path(const struct ofproto_dpif
*ofproto
, const struct flow
*flow
,
5535 enum slow_path_reason slow
,
5536 uint64_t *stub
, size_t stub_size
,
5537 const struct nlattr
**actionsp
, size_t *actions_lenp
)
5539 union user_action_cookie cookie
;
5542 cookie
.type
= USER_ACTION_COOKIE_SLOW_PATH
;
5543 cookie
.slow_path
.unused
= 0;
5544 cookie
.slow_path
.reason
= slow
;
5546 ofpbuf_use_stack(&buf
, stub
, stub_size
);
5547 if (slow
& (SLOW_CFM
| SLOW_LACP
| SLOW_STP
)) {
5548 uint32_t pid
= dpif_port_get_pid(ofproto
->backer
->dpif
, UINT32_MAX
);
5549 odp_put_userspace_action(pid
, &cookie
, sizeof cookie
, &buf
);
5551 put_userspace_action(ofproto
, &buf
, flow
, &cookie
);
5553 *actionsp
= buf
.data
;
5554 *actions_lenp
= buf
.size
;
5558 put_userspace_action(const struct ofproto_dpif
*ofproto
,
5559 struct ofpbuf
*odp_actions
,
5560 const struct flow
*flow
,
5561 const union user_action_cookie
*cookie
)
5565 pid
= dpif_port_get_pid(ofproto
->backer
->dpif
,
5566 ofp_port_to_odp_port(ofproto
, flow
->in_port
));
5568 return odp_put_userspace_action(pid
, cookie
, sizeof *cookie
, odp_actions
);
5572 compose_sflow_cookie(const struct ofproto_dpif
*ofproto
,
5573 ovs_be16 vlan_tci
, uint32_t odp_port
,
5574 unsigned int n_outputs
, union user_action_cookie
*cookie
)
5578 cookie
->type
= USER_ACTION_COOKIE_SFLOW
;
5579 cookie
->sflow
.vlan_tci
= vlan_tci
;
5581 /* See http://www.sflow.org/sflow_version_5.txt (search for "Input/output
5582 * port information") for the interpretation of cookie->output. */
5583 switch (n_outputs
) {
5585 /* 0x40000000 | 256 means "packet dropped for unknown reason". */
5586 cookie
->sflow
.output
= 0x40000000 | 256;
5590 ifindex
= dpif_sflow_odp_port_to_ifindex(ofproto
->sflow
, odp_port
);
5592 cookie
->sflow
.output
= ifindex
;
5597 /* 0x80000000 means "multiple output ports. */
5598 cookie
->sflow
.output
= 0x80000000 | n_outputs
;
5603 /* Compose SAMPLE action for sFlow. */
5605 compose_sflow_action(const struct ofproto_dpif
*ofproto
,
5606 struct ofpbuf
*odp_actions
,
5607 const struct flow
*flow
,
5610 uint32_t probability
;
5611 union user_action_cookie cookie
;
5612 size_t sample_offset
, actions_offset
;
5615 if (!ofproto
->sflow
|| flow
->in_port
== OFPP_NONE
) {
5619 sample_offset
= nl_msg_start_nested(odp_actions
, OVS_ACTION_ATTR_SAMPLE
);
5621 /* Number of packets out of UINT_MAX to sample. */
5622 probability
= dpif_sflow_get_probability(ofproto
->sflow
);
5623 nl_msg_put_u32(odp_actions
, OVS_SAMPLE_ATTR_PROBABILITY
, probability
);
5625 actions_offset
= nl_msg_start_nested(odp_actions
, OVS_SAMPLE_ATTR_ACTIONS
);
5626 compose_sflow_cookie(ofproto
, htons(0), odp_port
,
5627 odp_port
== OVSP_NONE
? 0 : 1, &cookie
);
5628 cookie_offset
= put_userspace_action(ofproto
, odp_actions
, flow
, &cookie
);
5630 nl_msg_end_nested(odp_actions
, actions_offset
);
5631 nl_msg_end_nested(odp_actions
, sample_offset
);
5632 return cookie_offset
;
5635 /* SAMPLE action must be first action in any given list of actions.
5636 * At this point we do not have all information required to build it. So try to
5637 * build sample action as complete as possible. */
5639 add_sflow_action(struct action_xlate_ctx
*ctx
)
5641 ctx
->user_cookie_offset
= compose_sflow_action(ctx
->ofproto
,
5643 &ctx
->flow
, OVSP_NONE
);
5644 ctx
->sflow_odp_port
= 0;
5645 ctx
->sflow_n_outputs
= 0;
5648 /* Fix SAMPLE action according to data collected while composing ODP actions.
5649 * We need to fix SAMPLE actions OVS_SAMPLE_ATTR_ACTIONS attribute, i.e. nested
5650 * USERSPACE action's user-cookie which is required for sflow. */
5652 fix_sflow_action(struct action_xlate_ctx
*ctx
)
5654 const struct flow
*base
= &ctx
->base_flow
;
5655 union user_action_cookie
*cookie
;
5657 if (!ctx
->user_cookie_offset
) {
5661 cookie
= ofpbuf_at(ctx
->odp_actions
, ctx
->user_cookie_offset
,
5663 ovs_assert(cookie
->type
== USER_ACTION_COOKIE_SFLOW
);
5665 compose_sflow_cookie(ctx
->ofproto
, base
->vlan_tci
,
5666 ctx
->sflow_odp_port
, ctx
->sflow_n_outputs
, cookie
);
5670 compose_output_action__(struct action_xlate_ctx
*ctx
, uint16_t ofp_port
,
5673 const struct ofport_dpif
*ofport
= get_ofp_port(ctx
->ofproto
, ofp_port
);
5674 ovs_be16 flow_vlan_tci
= ctx
->flow
.vlan_tci
;
5675 ovs_be64 flow_tun_id
= ctx
->flow
.tunnel
.tun_id
;
5676 uint8_t flow_nw_tos
= ctx
->flow
.nw_tos
;
5677 struct priority_to_dscp
*pdscp
;
5678 uint32_t out_port
, odp_port
;
5680 /* If 'struct flow' gets additional metadata, we'll need to zero it out
5681 * before traversing a patch port. */
5682 BUILD_ASSERT_DECL(FLOW_WC_SEQ
== 19);
5685 xlate_report(ctx
, "Nonexistent output port");
5687 } else if (ofport
->up
.pp
.config
& OFPUTIL_PC_NO_FWD
) {
5688 xlate_report(ctx
, "OFPPC_NO_FWD set, skipping output");
5690 } else if (check_stp
&& !stp_forward_in_state(ofport
->stp_state
)) {
5691 xlate_report(ctx
, "STP not in forwarding state, skipping output");
5695 if (netdev_vport_is_patch(ofport
->up
.netdev
)) {
5696 struct ofport_dpif
*peer
= ofport_get_peer(ofport
);
5697 struct flow old_flow
= ctx
->flow
;
5698 const struct ofproto_dpif
*peer_ofproto
;
5699 enum slow_path_reason special
;
5700 struct ofport_dpif
*in_port
;
5703 xlate_report(ctx
, "Nonexistent patch port peer");
5707 peer_ofproto
= ofproto_dpif_cast(peer
->up
.ofproto
);
5708 if (peer_ofproto
->backer
!= ctx
->ofproto
->backer
) {
5709 xlate_report(ctx
, "Patch port peer on a different datapath");
5713 ctx
->ofproto
= ofproto_dpif_cast(peer
->up
.ofproto
);
5714 ctx
->flow
.in_port
= peer
->up
.ofp_port
;
5715 ctx
->flow
.metadata
= htonll(0);
5716 memset(&ctx
->flow
.tunnel
, 0, sizeof ctx
->flow
.tunnel
);
5717 memset(ctx
->flow
.regs
, 0, sizeof ctx
->flow
.regs
);
5719 in_port
= get_ofp_port(ctx
->ofproto
, ctx
->flow
.in_port
);
5720 special
= process_special(ctx
->ofproto
, &ctx
->flow
, in_port
,
5723 ctx
->slow
|= special
;
5724 } else if (!in_port
|| may_receive(in_port
, ctx
)) {
5725 if (!in_port
|| stp_forward_in_state(in_port
->stp_state
)) {
5726 xlate_table_action(ctx
, ctx
->flow
.in_port
, 0, true);
5728 /* Forwarding is disabled by STP. Let OFPP_NORMAL and the
5729 * learning action look at the packet, then drop it. */
5730 struct flow old_base_flow
= ctx
->base_flow
;
5731 size_t old_size
= ctx
->odp_actions
->size
;
5732 xlate_table_action(ctx
, ctx
->flow
.in_port
, 0, true);
5733 ctx
->base_flow
= old_base_flow
;
5734 ctx
->odp_actions
->size
= old_size
;
5738 ctx
->flow
= old_flow
;
5739 ctx
->ofproto
= ofproto_dpif_cast(ofport
->up
.ofproto
);
5741 if (ctx
->resubmit_stats
) {
5742 netdev_vport_inc_tx(ofport
->up
.netdev
, ctx
->resubmit_stats
);
5743 netdev_vport_inc_rx(peer
->up
.netdev
, ctx
->resubmit_stats
);
5749 pdscp
= get_priority(ofport
, ctx
->flow
.skb_priority
);
5751 ctx
->flow
.nw_tos
&= ~IP_DSCP_MASK
;
5752 ctx
->flow
.nw_tos
|= pdscp
->dscp
;
5755 odp_port
= ofp_port_to_odp_port(ctx
->ofproto
, ofp_port
);
5756 if (ofport
->tnl_port
) {
5757 odp_port
= tnl_port_send(ofport
->tnl_port
, &ctx
->flow
);
5758 if (odp_port
== OVSP_NONE
) {
5759 xlate_report(ctx
, "Tunneling decided against output");
5763 if (ctx
->resubmit_stats
) {
5764 netdev_vport_inc_tx(ofport
->up
.netdev
, ctx
->resubmit_stats
);
5766 out_port
= odp_port
;
5767 commit_odp_tunnel_action(&ctx
->flow
, &ctx
->base_flow
,
5770 out_port
= vsp_realdev_to_vlandev(ctx
->ofproto
, odp_port
,
5771 ctx
->flow
.vlan_tci
);
5772 if (out_port
!= odp_port
) {
5773 ctx
->flow
.vlan_tci
= htons(0);
5776 commit_odp_actions(&ctx
->flow
, &ctx
->base_flow
, ctx
->odp_actions
);
5777 nl_msg_put_u32(ctx
->odp_actions
, OVS_ACTION_ATTR_OUTPUT
, out_port
);
5779 ctx
->sflow_odp_port
= odp_port
;
5780 ctx
->sflow_n_outputs
++;
5781 ctx
->nf_output_iface
= ofp_port
;
5782 ctx
->flow
.tunnel
.tun_id
= flow_tun_id
;
5783 ctx
->flow
.vlan_tci
= flow_vlan_tci
;
5784 ctx
->flow
.nw_tos
= flow_nw_tos
;
5788 compose_output_action(struct action_xlate_ctx
*ctx
, uint16_t ofp_port
)
5790 compose_output_action__(ctx
, ofp_port
, true);
5794 xlate_table_action(struct action_xlate_ctx
*ctx
,
5795 uint16_t in_port
, uint8_t table_id
, bool may_packet_in
)
5797 if (ctx
->recurse
< MAX_RESUBMIT_RECURSION
) {
5798 struct ofproto_dpif
*ofproto
= ctx
->ofproto
;
5799 struct rule_dpif
*rule
;
5800 uint16_t old_in_port
;
5801 uint8_t old_table_id
;
5803 old_table_id
= ctx
->table_id
;
5804 ctx
->table_id
= table_id
;
5806 /* Look up a flow with 'in_port' as the input port. */
5807 old_in_port
= ctx
->flow
.in_port
;
5808 ctx
->flow
.in_port
= in_port
;
5809 rule
= rule_dpif_lookup__(ofproto
, &ctx
->flow
, table_id
);
5812 if (table_id
> 0 && table_id
< N_TABLES
) {
5813 struct table_dpif
*table
= &ofproto
->tables
[table_id
];
5814 if (table
->other_table
) {
5815 ctx
->tags
|= (rule
&& rule
->tag
5817 : rule_calculate_tag(&ctx
->flow
,
5818 &table
->other_table
->mask
,
5823 /* Restore the original input port. Otherwise OFPP_NORMAL and
5824 * OFPP_IN_PORT will have surprising behavior. */
5825 ctx
->flow
.in_port
= old_in_port
;
5827 if (ctx
->resubmit_hook
) {
5828 ctx
->resubmit_hook(ctx
, rule
);
5831 if (rule
== NULL
&& may_packet_in
) {
5833 * check if table configuration flags
5834 * OFPTC_TABLE_MISS_CONTROLLER, default.
5835 * OFPTC_TABLE_MISS_CONTINUE,
5836 * OFPTC_TABLE_MISS_DROP
5837 * When OF1.0, OFPTC_TABLE_MISS_CONTINUE is used. What to do?
5839 rule
= rule_dpif_miss_rule(ofproto
, &ctx
->flow
);
5843 struct rule_dpif
*old_rule
= ctx
->rule
;
5845 if (ctx
->resubmit_stats
) {
5846 rule_credit_stats(rule
, ctx
->resubmit_stats
);
5851 do_xlate_actions(rule
->up
.ofpacts
, rule
->up
.ofpacts_len
, ctx
);
5852 ctx
->rule
= old_rule
;
5856 ctx
->table_id
= old_table_id
;
5858 static struct vlog_rate_limit recurse_rl
= VLOG_RATE_LIMIT_INIT(1, 1);
5860 VLOG_ERR_RL(&recurse_rl
, "resubmit actions recursed over %d times",
5861 MAX_RESUBMIT_RECURSION
);
5862 ctx
->max_resubmit_trigger
= true;
5867 xlate_ofpact_resubmit(struct action_xlate_ctx
*ctx
,
5868 const struct ofpact_resubmit
*resubmit
)
5873 in_port
= resubmit
->in_port
;
5874 if (in_port
== OFPP_IN_PORT
) {
5875 in_port
= ctx
->flow
.in_port
;
5878 table_id
= resubmit
->table_id
;
5879 if (table_id
== 255) {
5880 table_id
= ctx
->table_id
;
5883 xlate_table_action(ctx
, in_port
, table_id
, false);
5887 flood_packets(struct action_xlate_ctx
*ctx
, bool all
)
5889 struct ofport_dpif
*ofport
;
5891 HMAP_FOR_EACH (ofport
, up
.hmap_node
, &ctx
->ofproto
->up
.ports
) {
5892 uint16_t ofp_port
= ofport
->up
.ofp_port
;
5894 if (ofp_port
== ctx
->flow
.in_port
) {
5899 compose_output_action__(ctx
, ofp_port
, false);
5900 } else if (!(ofport
->up
.pp
.config
& OFPUTIL_PC_NO_FLOOD
)) {
5901 compose_output_action(ctx
, ofp_port
);
5905 ctx
->nf_output_iface
= NF_OUT_FLOOD
;
5909 execute_controller_action(struct action_xlate_ctx
*ctx
, int len
,
5910 enum ofp_packet_in_reason reason
,
5911 uint16_t controller_id
)
5913 struct ofputil_packet_in pin
;
5914 struct ofpbuf
*packet
;
5916 ctx
->slow
|= SLOW_CONTROLLER
;
5921 packet
= ofpbuf_clone(ctx
->packet
);
5923 if (packet
->l2
&& packet
->l3
) {
5924 struct eth_header
*eh
;
5925 uint16_t mpls_depth
;
5927 eth_pop_vlan(packet
);
5930 memcpy(eh
->eth_src
, ctx
->flow
.dl_src
, sizeof eh
->eth_src
);
5931 memcpy(eh
->eth_dst
, ctx
->flow
.dl_dst
, sizeof eh
->eth_dst
);
5933 if (ctx
->flow
.vlan_tci
& htons(VLAN_CFI
)) {
5934 eth_push_vlan(packet
, ctx
->flow
.vlan_tci
);
5937 mpls_depth
= eth_mpls_depth(packet
);
5939 if (mpls_depth
< ctx
->flow
.mpls_depth
) {
5940 push_mpls(packet
, ctx
->flow
.dl_type
, ctx
->flow
.mpls_lse
);
5941 } else if (mpls_depth
> ctx
->flow
.mpls_depth
) {
5942 pop_mpls(packet
, ctx
->flow
.dl_type
);
5943 } else if (mpls_depth
) {
5944 set_mpls_lse(packet
, ctx
->flow
.mpls_lse
);
5948 if (ctx
->flow
.dl_type
== htons(ETH_TYPE_IP
)) {
5949 packet_set_ipv4(packet
, ctx
->flow
.nw_src
, ctx
->flow
.nw_dst
,
5950 ctx
->flow
.nw_tos
, ctx
->flow
.nw_ttl
);
5954 if (ctx
->flow
.nw_proto
== IPPROTO_TCP
) {
5955 packet_set_tcp_port(packet
, ctx
->flow
.tp_src
,
5957 } else if (ctx
->flow
.nw_proto
== IPPROTO_UDP
) {
5958 packet_set_udp_port(packet
, ctx
->flow
.tp_src
,
5965 pin
.packet
= packet
->data
;
5966 pin
.packet_len
= packet
->size
;
5967 pin
.reason
= reason
;
5968 pin
.controller_id
= controller_id
;
5969 pin
.table_id
= ctx
->table_id
;
5970 pin
.cookie
= ctx
->rule
? ctx
->rule
->up
.flow_cookie
: 0;
5973 flow_get_metadata(&ctx
->flow
, &pin
.fmd
);
5975 connmgr_send_packet_in(ctx
->ofproto
->up
.connmgr
, &pin
);
5976 ofpbuf_delete(packet
);
5980 execute_mpls_push_action(struct action_xlate_ctx
*ctx
, ovs_be16 eth_type
)
5982 ovs_assert(eth_type_mpls(eth_type
));
5984 if (ctx
->base_flow
.mpls_depth
) {
5985 ctx
->flow
.mpls_lse
&= ~htonl(MPLS_BOS_MASK
);
5986 ctx
->flow
.mpls_depth
++;
5991 if (ctx
->flow
.dl_type
== htons(ETH_TYPE_IPV6
)) {
5992 label
= htonl(0x2); /* IPV6 Explicit Null. */
5994 label
= htonl(0x0); /* IPV4 Explicit Null. */
5996 tc
= (ctx
->flow
.nw_tos
& IP_DSCP_MASK
) >> 2;
5997 ttl
= ctx
->flow
.nw_ttl
? ctx
->flow
.nw_ttl
: 0x40;
5998 ctx
->flow
.mpls_lse
= set_mpls_lse_values(ttl
, tc
, 1, label
);
5999 ctx
->flow
.encap_dl_type
= ctx
->flow
.dl_type
;
6000 ctx
->flow
.mpls_depth
= 1;
6002 ctx
->flow
.dl_type
= eth_type
;
6006 execute_mpls_pop_action(struct action_xlate_ctx
*ctx
, ovs_be16 eth_type
)
6008 ovs_assert(eth_type_mpls(ctx
->flow
.dl_type
));
6009 ovs_assert(!eth_type_mpls(eth_type
));
6011 if (ctx
->flow
.mpls_depth
) {
6012 ctx
->flow
.mpls_depth
--;
6013 ctx
->flow
.mpls_lse
= htonl(0);
6014 if (!ctx
->flow
.mpls_depth
) {
6015 ctx
->flow
.dl_type
= eth_type
;
6016 ctx
->flow
.encap_dl_type
= htons(0);
6022 compose_dec_ttl(struct action_xlate_ctx
*ctx
, struct ofpact_cnt_ids
*ids
)
6024 if (ctx
->flow
.dl_type
!= htons(ETH_TYPE_IP
) &&
6025 ctx
->flow
.dl_type
!= htons(ETH_TYPE_IPV6
)) {
6029 if (ctx
->flow
.nw_ttl
> 1) {
6035 for (i
= 0; i
< ids
->n_controllers
; i
++) {
6036 execute_controller_action(ctx
, UINT16_MAX
, OFPR_INVALID_TTL
,
6040 /* Stop processing for current table. */
6046 execute_set_mpls_ttl_action(struct action_xlate_ctx
*ctx
, uint8_t ttl
)
6048 if (!eth_type_mpls(ctx
->flow
.dl_type
)) {
6052 set_mpls_lse_ttl(&ctx
->flow
.mpls_lse
, ttl
);
6057 execute_dec_mpls_ttl_action(struct action_xlate_ctx
*ctx
)
6059 uint8_t ttl
= mpls_lse_to_ttl(ctx
->flow
.mpls_lse
);
6061 if (!eth_type_mpls(ctx
->flow
.dl_type
)) {
6067 set_mpls_lse_ttl(&ctx
->flow
.mpls_lse
, ttl
);
6070 execute_controller_action(ctx
, UINT16_MAX
, OFPR_INVALID_TTL
, 0);
6072 /* Stop processing for current table. */
6078 xlate_output_action(struct action_xlate_ctx
*ctx
,
6079 uint16_t port
, uint16_t max_len
, bool may_packet_in
)
6081 uint16_t prev_nf_output_iface
= ctx
->nf_output_iface
;
6083 ctx
->nf_output_iface
= NF_OUT_DROP
;
6087 compose_output_action(ctx
, ctx
->flow
.in_port
);
6090 xlate_table_action(ctx
, ctx
->flow
.in_port
, 0, may_packet_in
);
6096 flood_packets(ctx
, false);
6099 flood_packets(ctx
, true);
6101 case OFPP_CONTROLLER
:
6102 execute_controller_action(ctx
, max_len
, OFPR_ACTION
, 0);
6108 if (port
!= ctx
->flow
.in_port
) {
6109 compose_output_action(ctx
, port
);
6111 xlate_report(ctx
, "skipping output to input port");
6116 if (prev_nf_output_iface
== NF_OUT_FLOOD
) {
6117 ctx
->nf_output_iface
= NF_OUT_FLOOD
;
6118 } else if (ctx
->nf_output_iface
== NF_OUT_DROP
) {
6119 ctx
->nf_output_iface
= prev_nf_output_iface
;
6120 } else if (prev_nf_output_iface
!= NF_OUT_DROP
&&
6121 ctx
->nf_output_iface
!= NF_OUT_FLOOD
) {
6122 ctx
->nf_output_iface
= NF_OUT_MULTI
;
6127 xlate_output_reg_action(struct action_xlate_ctx
*ctx
,
6128 const struct ofpact_output_reg
*or)
6130 uint64_t port
= mf_get_subfield(&or->src
, &ctx
->flow
);
6131 if (port
<= UINT16_MAX
) {
6132 xlate_output_action(ctx
, port
, or->max_len
, false);
6137 xlate_enqueue_action(struct action_xlate_ctx
*ctx
,
6138 const struct ofpact_enqueue
*enqueue
)
6140 uint16_t ofp_port
= enqueue
->port
;
6141 uint32_t queue_id
= enqueue
->queue
;
6142 uint32_t flow_priority
, priority
;
6145 /* Translate queue to priority. */
6146 error
= dpif_queue_to_priority(ctx
->ofproto
->backer
->dpif
,
6147 queue_id
, &priority
);
6149 /* Fall back to ordinary output action. */
6150 xlate_output_action(ctx
, enqueue
->port
, 0, false);
6154 /* Check output port. */
6155 if (ofp_port
== OFPP_IN_PORT
) {
6156 ofp_port
= ctx
->flow
.in_port
;
6157 } else if (ofp_port
== ctx
->flow
.in_port
) {
6161 /* Add datapath actions. */
6162 flow_priority
= ctx
->flow
.skb_priority
;
6163 ctx
->flow
.skb_priority
= priority
;
6164 compose_output_action(ctx
, ofp_port
);
6165 ctx
->flow
.skb_priority
= flow_priority
;
6167 /* Update NetFlow output port. */
6168 if (ctx
->nf_output_iface
== NF_OUT_DROP
) {
6169 ctx
->nf_output_iface
= ofp_port
;
6170 } else if (ctx
->nf_output_iface
!= NF_OUT_FLOOD
) {
6171 ctx
->nf_output_iface
= NF_OUT_MULTI
;
6176 xlate_set_queue_action(struct action_xlate_ctx
*ctx
, uint32_t queue_id
)
6178 uint32_t skb_priority
;
6180 if (!dpif_queue_to_priority(ctx
->ofproto
->backer
->dpif
,
6181 queue_id
, &skb_priority
)) {
6182 ctx
->flow
.skb_priority
= skb_priority
;
6184 /* Couldn't translate queue to a priority. Nothing to do. A warning
6185 * has already been logged. */
6189 struct xlate_reg_state
{
6195 slave_enabled_cb(uint16_t ofp_port
, void *ofproto_
)
6197 struct ofproto_dpif
*ofproto
= ofproto_
;
6198 struct ofport_dpif
*port
;
6208 case OFPP_CONTROLLER
: /* Not supported by the bundle action. */
6211 port
= get_ofp_port(ofproto
, ofp_port
);
6212 return port
? port
->may_enable
: false;
6217 xlate_bundle_action(struct action_xlate_ctx
*ctx
,
6218 const struct ofpact_bundle
*bundle
)
6222 port
= bundle_execute(bundle
, &ctx
->flow
, slave_enabled_cb
, ctx
->ofproto
);
6223 if (bundle
->dst
.field
) {
6224 nxm_reg_load(&bundle
->dst
, port
, &ctx
->flow
);
6226 xlate_output_action(ctx
, port
, 0, false);
6231 xlate_learn_action(struct action_xlate_ctx
*ctx
,
6232 const struct ofpact_learn
*learn
)
6234 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(5, 1);
6235 struct ofputil_flow_mod fm
;
6236 uint64_t ofpacts_stub
[1024 / 8];
6237 struct ofpbuf ofpacts
;
6240 ofpbuf_use_stack(&ofpacts
, ofpacts_stub
, sizeof ofpacts_stub
);
6241 learn_execute(learn
, &ctx
->flow
, &fm
, &ofpacts
);
6243 error
= ofproto_flow_mod(&ctx
->ofproto
->up
, &fm
);
6244 if (error
&& !VLOG_DROP_WARN(&rl
)) {
6245 VLOG_WARN("learning action failed to modify flow table (%s)",
6246 ofperr_get_name(error
));
6249 ofpbuf_uninit(&ofpacts
);
6252 /* Reduces '*timeout' to no more than 'max'. A value of zero in either case
6253 * means "infinite". */
6255 reduce_timeout(uint16_t max
, uint16_t *timeout
)
6257 if (max
&& (!*timeout
|| *timeout
> max
)) {
6263 xlate_fin_timeout(struct action_xlate_ctx
*ctx
,
6264 const struct ofpact_fin_timeout
*oft
)
6266 if (ctx
->tcp_flags
& (TCP_FIN
| TCP_RST
) && ctx
->rule
) {
6267 struct rule_dpif
*rule
= ctx
->rule
;
6269 reduce_timeout(oft
->fin_idle_timeout
, &rule
->up
.idle_timeout
);
6270 reduce_timeout(oft
->fin_hard_timeout
, &rule
->up
.hard_timeout
);
6275 may_receive(const struct ofport_dpif
*port
, struct action_xlate_ctx
*ctx
)
6277 if (port
->up
.pp
.config
& (eth_addr_equals(ctx
->flow
.dl_dst
, eth_addr_stp
)
6278 ? OFPUTIL_PC_NO_RECV_STP
6279 : OFPUTIL_PC_NO_RECV
)) {
6283 /* Only drop packets here if both forwarding and learning are
6284 * disabled. If just learning is enabled, we need to have
6285 * OFPP_NORMAL and the learning action have a look at the packet
6286 * before we can drop it. */
6287 if (!stp_forward_in_state(port
->stp_state
)
6288 && !stp_learn_in_state(port
->stp_state
)) {
6296 do_xlate_actions(const struct ofpact
*ofpacts
, size_t ofpacts_len
,
6297 struct action_xlate_ctx
*ctx
)
6299 bool was_evictable
= true;
6300 const struct ofpact
*a
;
6303 /* Don't let the rule we're working on get evicted underneath us. */
6304 was_evictable
= ctx
->rule
->up
.evictable
;
6305 ctx
->rule
->up
.evictable
= false;
6307 OFPACT_FOR_EACH (a
, ofpacts
, ofpacts_len
) {
6308 struct ofpact_controller
*controller
;
6309 const struct ofpact_metadata
*metadata
;
6317 xlate_output_action(ctx
, ofpact_get_OUTPUT(a
)->port
,
6318 ofpact_get_OUTPUT(a
)->max_len
, true);
6321 case OFPACT_CONTROLLER
:
6322 controller
= ofpact_get_CONTROLLER(a
);
6323 execute_controller_action(ctx
, controller
->max_len
,
6325 controller
->controller_id
);
6328 case OFPACT_ENQUEUE
:
6329 xlate_enqueue_action(ctx
, ofpact_get_ENQUEUE(a
));
6332 case OFPACT_SET_VLAN_VID
:
6333 ctx
->flow
.vlan_tci
&= ~htons(VLAN_VID_MASK
);
6334 ctx
->flow
.vlan_tci
|= (htons(ofpact_get_SET_VLAN_VID(a
)->vlan_vid
)
6338 case OFPACT_SET_VLAN_PCP
:
6339 ctx
->flow
.vlan_tci
&= ~htons(VLAN_PCP_MASK
);
6340 ctx
->flow
.vlan_tci
|= htons((ofpact_get_SET_VLAN_PCP(a
)->vlan_pcp
6345 case OFPACT_STRIP_VLAN
:
6346 ctx
->flow
.vlan_tci
= htons(0);
6349 case OFPACT_PUSH_VLAN
:
6350 /* XXX 802.1AD(QinQ) */
6351 ctx
->flow
.vlan_tci
= htons(VLAN_CFI
);
6354 case OFPACT_SET_ETH_SRC
:
6355 memcpy(ctx
->flow
.dl_src
, ofpact_get_SET_ETH_SRC(a
)->mac
,
6359 case OFPACT_SET_ETH_DST
:
6360 memcpy(ctx
->flow
.dl_dst
, ofpact_get_SET_ETH_DST(a
)->mac
,
6364 case OFPACT_SET_IPV4_SRC
:
6365 ctx
->flow
.nw_src
= ofpact_get_SET_IPV4_SRC(a
)->ipv4
;
6368 case OFPACT_SET_IPV4_DST
:
6369 ctx
->flow
.nw_dst
= ofpact_get_SET_IPV4_DST(a
)->ipv4
;
6372 case OFPACT_SET_IPV4_DSCP
:
6373 /* OpenFlow 1.0 only supports IPv4. */
6374 if (ctx
->flow
.dl_type
== htons(ETH_TYPE_IP
)) {
6375 ctx
->flow
.nw_tos
&= ~IP_DSCP_MASK
;
6376 ctx
->flow
.nw_tos
|= ofpact_get_SET_IPV4_DSCP(a
)->dscp
;
6380 case OFPACT_SET_L4_SRC_PORT
:
6381 ctx
->flow
.tp_src
= htons(ofpact_get_SET_L4_SRC_PORT(a
)->port
);
6384 case OFPACT_SET_L4_DST_PORT
:
6385 ctx
->flow
.tp_dst
= htons(ofpact_get_SET_L4_DST_PORT(a
)->port
);
6388 case OFPACT_RESUBMIT
:
6389 xlate_ofpact_resubmit(ctx
, ofpact_get_RESUBMIT(a
));
6392 case OFPACT_SET_TUNNEL
:
6393 ctx
->flow
.tunnel
.tun_id
= htonll(ofpact_get_SET_TUNNEL(a
)->tun_id
);
6396 case OFPACT_SET_QUEUE
:
6397 xlate_set_queue_action(ctx
, ofpact_get_SET_QUEUE(a
)->queue_id
);
6400 case OFPACT_POP_QUEUE
:
6401 ctx
->flow
.skb_priority
= ctx
->orig_skb_priority
;
6404 case OFPACT_REG_MOVE
:
6405 nxm_execute_reg_move(ofpact_get_REG_MOVE(a
), &ctx
->flow
);
6408 case OFPACT_REG_LOAD
:
6409 nxm_execute_reg_load(ofpact_get_REG_LOAD(a
), &ctx
->flow
);
6412 case OFPACT_STACK_PUSH
:
6413 nxm_execute_stack_push(ofpact_get_STACK_PUSH(a
), &ctx
->flow
,
6417 case OFPACT_STACK_POP
:
6418 nxm_execute_stack_pop(ofpact_get_STACK_POP(a
), &ctx
->flow
,
6422 case OFPACT_PUSH_MPLS
:
6423 execute_mpls_push_action(ctx
, ofpact_get_PUSH_MPLS(a
)->ethertype
);
6426 case OFPACT_POP_MPLS
:
6427 execute_mpls_pop_action(ctx
, ofpact_get_POP_MPLS(a
)->ethertype
);
6430 case OFPACT_SET_MPLS_TTL
:
6431 if (execute_set_mpls_ttl_action(ctx
, ofpact_get_SET_MPLS_TTL(a
)->ttl
)) {
6436 case OFPACT_DEC_MPLS_TTL
:
6437 if (execute_dec_mpls_ttl_action(ctx
)) {
6442 case OFPACT_DEC_TTL
:
6443 if (compose_dec_ttl(ctx
, ofpact_get_DEC_TTL(a
))) {
6449 /* Nothing to do. */
6452 case OFPACT_MULTIPATH
:
6453 multipath_execute(ofpact_get_MULTIPATH(a
), &ctx
->flow
);
6457 ctx
->ofproto
->has_bundle_action
= true;
6458 xlate_bundle_action(ctx
, ofpact_get_BUNDLE(a
));
6461 case OFPACT_OUTPUT_REG
:
6462 xlate_output_reg_action(ctx
, ofpact_get_OUTPUT_REG(a
));
6466 ctx
->has_learn
= true;
6467 if (ctx
->may_learn
) {
6468 xlate_learn_action(ctx
, ofpact_get_LEARN(a
));
6476 case OFPACT_FIN_TIMEOUT
:
6477 ctx
->has_fin_timeout
= true;
6478 xlate_fin_timeout(ctx
, ofpact_get_FIN_TIMEOUT(a
));
6481 case OFPACT_CLEAR_ACTIONS
:
6483 * Nothing to do because writa-actions is not supported for now.
6484 * When writa-actions is supported, clear-actions also must
6485 * be supported at the same time.
6489 case OFPACT_WRITE_METADATA
:
6490 metadata
= ofpact_get_WRITE_METADATA(a
);
6491 ctx
->flow
.metadata
&= ~metadata
->mask
;
6492 ctx
->flow
.metadata
|= metadata
->metadata
& metadata
->mask
;
6495 case OFPACT_GOTO_TABLE
: {
6496 /* XXX remove recursion */
6497 /* It is assumed that goto-table is last action */
6498 struct ofpact_goto_table
*ogt
= ofpact_get_GOTO_TABLE(a
);
6499 ovs_assert(ctx
->table_id
< ogt
->table_id
);
6500 xlate_table_action(ctx
, ctx
->flow
.in_port
, ogt
->table_id
, true);
6508 ctx
->rule
->up
.evictable
= was_evictable
;
6513 action_xlate_ctx_init(struct action_xlate_ctx
*ctx
,
6514 struct ofproto_dpif
*ofproto
, const struct flow
*flow
,
6515 ovs_be16 initial_tci
, struct rule_dpif
*rule
,
6516 uint8_t tcp_flags
, const struct ofpbuf
*packet
)
6518 ovs_be64 initial_tun_id
= flow
->tunnel
.tun_id
;
6520 /* Flow initialization rules:
6521 * - 'base_flow' must match the kernel's view of the packet at the
6522 * time that action processing starts. 'flow' represents any
6523 * transformations we wish to make through actions.
6524 * - By default 'base_flow' and 'flow' are the same since the input
6525 * packet matches the output before any actions are applied.
6526 * - When using VLAN splinters, 'base_flow''s VLAN is set to the value
6527 * of the received packet as seen by the kernel. If we later output
6528 * to another device without any modifications this will cause us to
6529 * insert a new tag since the original one was stripped off by the
6531 * - Tunnel 'flow' is largely cleared when transitioning between
6532 * the input and output stages since it does not make sense to output
6533 * a packet with the exact headers that it was received with (i.e.
6534 * the destination IP is us). The one exception is the tun_id, which
6535 * is preserved to allow use in later resubmit lookups and loads into
6537 * - Tunnel 'base_flow' is completely cleared since that is what the
6538 * kernel does. If we wish to maintain the original values an action
6539 * needs to be generated. */
6541 ctx
->ofproto
= ofproto
;
6543 memset(&ctx
->flow
.tunnel
, 0, sizeof ctx
->flow
.tunnel
);
6544 ctx
->base_flow
= ctx
->flow
;
6545 ctx
->base_flow
.vlan_tci
= initial_tci
;
6546 ctx
->flow
.tunnel
.tun_id
= initial_tun_id
;
6548 ctx
->packet
= packet
;
6549 ctx
->may_learn
= packet
!= NULL
;
6550 ctx
->tcp_flags
= tcp_flags
;
6551 ctx
->resubmit_hook
= NULL
;
6552 ctx
->report_hook
= NULL
;
6553 ctx
->resubmit_stats
= NULL
;
6556 /* Translates the 'ofpacts_len' bytes of "struct ofpacts" starting at 'ofpacts'
6557 * into datapath actions in 'odp_actions', using 'ctx'. */
6559 xlate_actions(struct action_xlate_ctx
*ctx
,
6560 const struct ofpact
*ofpacts
, size_t ofpacts_len
,
6561 struct ofpbuf
*odp_actions
)
6563 /* Normally false. Set to true if we ever hit MAX_RESUBMIT_RECURSION, so
6564 * that in the future we always keep a copy of the original flow for
6565 * tracing purposes. */
6566 static bool hit_resubmit_limit
;
6568 enum slow_path_reason special
;
6569 struct ofport_dpif
*in_port
;
6570 struct flow orig_flow
;
6572 COVERAGE_INC(ofproto_dpif_xlate
);
6574 ofpbuf_clear(odp_actions
);
6575 ofpbuf_reserve(odp_actions
, NL_A_U32_SIZE
);
6577 ctx
->odp_actions
= odp_actions
;
6580 ctx
->has_learn
= false;
6581 ctx
->has_normal
= false;
6582 ctx
->has_fin_timeout
= false;
6583 ctx
->nf_output_iface
= NF_OUT_DROP
;
6586 ctx
->max_resubmit_trigger
= false;
6587 ctx
->orig_skb_priority
= ctx
->flow
.skb_priority
;
6591 ofpbuf_use_stub(&ctx
->stack
, ctx
->init_stack
, sizeof ctx
->init_stack
);
6593 if (ctx
->ofproto
->has_mirrors
|| hit_resubmit_limit
) {
6594 /* Do this conditionally because the copy is expensive enough that it
6595 * shows up in profiles. */
6596 orig_flow
= ctx
->flow
;
6599 if (ctx
->flow
.nw_frag
& FLOW_NW_FRAG_ANY
) {
6600 switch (ctx
->ofproto
->up
.frag_handling
) {
6601 case OFPC_FRAG_NORMAL
:
6602 /* We must pretend that transport ports are unavailable. */
6603 ctx
->flow
.tp_src
= ctx
->base_flow
.tp_src
= htons(0);
6604 ctx
->flow
.tp_dst
= ctx
->base_flow
.tp_dst
= htons(0);
6607 case OFPC_FRAG_DROP
:
6610 case OFPC_FRAG_REASM
:
6613 case OFPC_FRAG_NX_MATCH
:
6614 /* Nothing to do. */
6617 case OFPC_INVALID_TTL_TO_CONTROLLER
:
6622 in_port
= get_ofp_port(ctx
->ofproto
, ctx
->flow
.in_port
);
6623 special
= process_special(ctx
->ofproto
, &ctx
->flow
, in_port
, ctx
->packet
);
6625 ctx
->slow
|= special
;
6627 static struct vlog_rate_limit trace_rl
= VLOG_RATE_LIMIT_INIT(1, 1);
6628 ovs_be16 initial_tci
= ctx
->base_flow
.vlan_tci
;
6629 uint32_t local_odp_port
;
6631 add_sflow_action(ctx
);
6633 if (!in_port
|| may_receive(in_port
, ctx
)) {
6634 do_xlate_actions(ofpacts
, ofpacts_len
, ctx
);
6636 /* We've let OFPP_NORMAL and the learning action look at the
6637 * packet, so drop it now if forwarding is disabled. */
6638 if (in_port
&& !stp_forward_in_state(in_port
->stp_state
)) {
6639 ofpbuf_clear(ctx
->odp_actions
);
6640 add_sflow_action(ctx
);
6644 if (ctx
->max_resubmit_trigger
&& !ctx
->resubmit_hook
) {
6645 if (!hit_resubmit_limit
) {
6646 /* We didn't record the original flow. Make sure we do from
6648 hit_resubmit_limit
= true;
6649 } else if (!VLOG_DROP_ERR(&trace_rl
)) {
6650 struct ds ds
= DS_EMPTY_INITIALIZER
;
6652 ofproto_trace(ctx
->ofproto
, &orig_flow
, ctx
->packet
,
6654 VLOG_ERR("Trace triggered by excessive resubmit "
6655 "recursion:\n%s", ds_cstr(&ds
));
6660 local_odp_port
= ofp_port_to_odp_port(ctx
->ofproto
, OFPP_LOCAL
);
6661 if (!connmgr_may_set_up_flow(ctx
->ofproto
->up
.connmgr
, &ctx
->flow
,
6663 ctx
->odp_actions
->data
,
6664 ctx
->odp_actions
->size
)) {
6665 ctx
->slow
|= SLOW_IN_BAND
;
6667 && connmgr_msg_in_hook(ctx
->ofproto
->up
.connmgr
, &ctx
->flow
,
6669 compose_output_action(ctx
, OFPP_LOCAL
);
6672 if (ctx
->ofproto
->has_mirrors
) {
6673 add_mirror_actions(ctx
, &orig_flow
);
6675 fix_sflow_action(ctx
);
6678 ofpbuf_uninit(&ctx
->stack
);
6681 /* Translates the 'ofpacts_len' bytes of "struct ofpact"s starting at 'ofpacts'
6682 * into datapath actions, using 'ctx', and discards the datapath actions. */
6684 xlate_actions_for_side_effects(struct action_xlate_ctx
*ctx
,
6685 const struct ofpact
*ofpacts
,
6688 uint64_t odp_actions_stub
[1024 / 8];
6689 struct ofpbuf odp_actions
;
6691 ofpbuf_use_stub(&odp_actions
, odp_actions_stub
, sizeof odp_actions_stub
);
6692 xlate_actions(ctx
, ofpacts
, ofpacts_len
, &odp_actions
);
6693 ofpbuf_uninit(&odp_actions
);
6697 xlate_report(struct action_xlate_ctx
*ctx
, const char *s
)
6699 if (ctx
->report_hook
) {
6700 ctx
->report_hook(ctx
, s
);
6704 /* OFPP_NORMAL implementation. */
6706 static struct ofport_dpif
*ofbundle_get_a_port(const struct ofbundle
*);
6708 /* Given 'vid', the VID obtained from the 802.1Q header that was received as
6709 * part of a packet (specify 0 if there was no 802.1Q header), and 'in_bundle',
6710 * the bundle on which the packet was received, returns the VLAN to which the
6713 * Both 'vid' and the return value are in the range 0...4095. */
6715 input_vid_to_vlan(const struct ofbundle
*in_bundle
, uint16_t vid
)
6717 switch (in_bundle
->vlan_mode
) {
6718 case PORT_VLAN_ACCESS
:
6719 return in_bundle
->vlan
;
6722 case PORT_VLAN_TRUNK
:
6725 case PORT_VLAN_NATIVE_UNTAGGED
:
6726 case PORT_VLAN_NATIVE_TAGGED
:
6727 return vid
? vid
: in_bundle
->vlan
;
6734 /* Checks whether a packet with the given 'vid' may ingress on 'in_bundle'.
6735 * If so, returns true. Otherwise, returns false and, if 'warn' is true, logs
6738 * 'vid' should be the VID obtained from the 802.1Q header that was received as
6739 * part of a packet (specify 0 if there was no 802.1Q header), in the range
6742 input_vid_is_valid(uint16_t vid
, struct ofbundle
*in_bundle
, bool warn
)
6744 /* Allow any VID on the OFPP_NONE port. */
6745 if (in_bundle
== &ofpp_none_bundle
) {
6749 switch (in_bundle
->vlan_mode
) {
6750 case PORT_VLAN_ACCESS
:
6753 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(1, 5);
6754 VLOG_WARN_RL(&rl
, "bridge %s: dropping VLAN %"PRIu16
" tagged "
6755 "packet received on port %s configured as VLAN "
6756 "%"PRIu16
" access port",
6757 in_bundle
->ofproto
->up
.name
, vid
,
6758 in_bundle
->name
, in_bundle
->vlan
);
6764 case PORT_VLAN_NATIVE_UNTAGGED
:
6765 case PORT_VLAN_NATIVE_TAGGED
:
6767 /* Port must always carry its native VLAN. */
6771 case PORT_VLAN_TRUNK
:
6772 if (!ofbundle_includes_vlan(in_bundle
, vid
)) {
6774 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(1, 5);
6775 VLOG_WARN_RL(&rl
, "bridge %s: dropping VLAN %"PRIu16
" packet "
6776 "received on port %s not configured for trunking "
6778 in_bundle
->ofproto
->up
.name
, vid
,
6779 in_bundle
->name
, vid
);
6791 /* Given 'vlan', the VLAN that a packet belongs to, and
6792 * 'out_bundle', a bundle on which the packet is to be output, returns the VID
6793 * that should be included in the 802.1Q header. (If the return value is 0,
6794 * then the 802.1Q header should only be included in the packet if there is a
6797 * Both 'vlan' and the return value are in the range 0...4095. */
6799 output_vlan_to_vid(const struct ofbundle
*out_bundle
, uint16_t vlan
)
6801 switch (out_bundle
->vlan_mode
) {
6802 case PORT_VLAN_ACCESS
:
6805 case PORT_VLAN_TRUNK
:
6806 case PORT_VLAN_NATIVE_TAGGED
:
6809 case PORT_VLAN_NATIVE_UNTAGGED
:
6810 return vlan
== out_bundle
->vlan
? 0 : vlan
;
6818 output_normal(struct action_xlate_ctx
*ctx
, const struct ofbundle
*out_bundle
,
6821 struct ofport_dpif
*port
;
6823 ovs_be16 tci
, old_tci
;
6825 vid
= output_vlan_to_vid(out_bundle
, vlan
);
6826 if (!out_bundle
->bond
) {
6827 port
= ofbundle_get_a_port(out_bundle
);
6829 port
= bond_choose_output_slave(out_bundle
->bond
, &ctx
->flow
,
6832 /* No slaves enabled, so drop packet. */
6837 old_tci
= ctx
->flow
.vlan_tci
;
6839 if (tci
|| out_bundle
->use_priority_tags
) {
6840 tci
|= ctx
->flow
.vlan_tci
& htons(VLAN_PCP_MASK
);
6842 tci
|= htons(VLAN_CFI
);
6845 ctx
->flow
.vlan_tci
= tci
;
6847 compose_output_action(ctx
, port
->up
.ofp_port
);
6848 ctx
->flow
.vlan_tci
= old_tci
;
6852 mirror_mask_ffs(mirror_mask_t mask
)
6854 BUILD_ASSERT_DECL(sizeof(unsigned int) >= sizeof(mask
));
6859 ofbundle_trunks_vlan(const struct ofbundle
*bundle
, uint16_t vlan
)
6861 return (bundle
->vlan_mode
!= PORT_VLAN_ACCESS
6862 && (!bundle
->trunks
|| bitmap_is_set(bundle
->trunks
, vlan
)));
6866 ofbundle_includes_vlan(const struct ofbundle
*bundle
, uint16_t vlan
)
6868 return vlan
== bundle
->vlan
|| ofbundle_trunks_vlan(bundle
, vlan
);
6871 /* Returns an arbitrary interface within 'bundle'. */
6872 static struct ofport_dpif
*
6873 ofbundle_get_a_port(const struct ofbundle
*bundle
)
6875 return CONTAINER_OF(list_front(&bundle
->ports
),
6876 struct ofport_dpif
, bundle_node
);
6880 vlan_is_mirrored(const struct ofmirror
*m
, int vlan
)
6882 return !m
->vlans
|| bitmap_is_set(m
->vlans
, vlan
);
6886 add_mirror_actions(struct action_xlate_ctx
*ctx
, const struct flow
*orig_flow
)
6888 struct ofproto_dpif
*ofproto
= ctx
->ofproto
;
6889 mirror_mask_t mirrors
;
6890 struct ofbundle
*in_bundle
;
6893 const struct nlattr
*a
;
6896 in_bundle
= lookup_input_bundle(ctx
->ofproto
, orig_flow
->in_port
,
6897 ctx
->packet
!= NULL
, NULL
);
6901 mirrors
= in_bundle
->src_mirrors
;
6903 /* Drop frames on bundles reserved for mirroring. */
6904 if (in_bundle
->mirror_out
) {
6905 if (ctx
->packet
!= NULL
) {
6906 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(1, 5);
6907 VLOG_WARN_RL(&rl
, "bridge %s: dropping packet received on port "
6908 "%s, which is reserved exclusively for mirroring",
6909 ctx
->ofproto
->up
.name
, in_bundle
->name
);
6915 vid
= vlan_tci_to_vid(orig_flow
->vlan_tci
);
6916 if (!input_vid_is_valid(vid
, in_bundle
, ctx
->packet
!= NULL
)) {
6919 vlan
= input_vid_to_vlan(in_bundle
, vid
);
6921 /* Look at the output ports to check for destination selections. */
6923 NL_ATTR_FOR_EACH (a
, left
, ctx
->odp_actions
->data
,
6924 ctx
->odp_actions
->size
) {
6925 enum ovs_action_attr type
= nl_attr_type(a
);
6926 struct ofport_dpif
*ofport
;
6928 if (type
!= OVS_ACTION_ATTR_OUTPUT
) {
6932 ofport
= get_odp_port(ofproto
, nl_attr_get_u32(a
));
6933 if (ofport
&& ofport
->bundle
) {
6934 mirrors
|= ofport
->bundle
->dst_mirrors
;
6942 /* Restore the original packet before adding the mirror actions. */
6943 ctx
->flow
= *orig_flow
;
6948 m
= ofproto
->mirrors
[mirror_mask_ffs(mirrors
) - 1];
6950 if (!vlan_is_mirrored(m
, vlan
)) {
6951 mirrors
= zero_rightmost_1bit(mirrors
);
6955 mirrors
&= ~m
->dup_mirrors
;
6956 ctx
->mirrors
|= m
->dup_mirrors
;
6958 output_normal(ctx
, m
->out
, vlan
);
6959 } else if (vlan
!= m
->out_vlan
6960 && !eth_addr_is_reserved(orig_flow
->dl_dst
)) {
6961 struct ofbundle
*bundle
;
6963 HMAP_FOR_EACH (bundle
, hmap_node
, &ofproto
->bundles
) {
6964 if (ofbundle_includes_vlan(bundle
, m
->out_vlan
)
6965 && !bundle
->mirror_out
) {
6966 output_normal(ctx
, bundle
, m
->out_vlan
);
6974 update_mirror_stats(struct ofproto_dpif
*ofproto
, mirror_mask_t mirrors
,
6975 uint64_t packets
, uint64_t bytes
)
6981 for (; mirrors
; mirrors
= zero_rightmost_1bit(mirrors
)) {
6984 m
= ofproto
->mirrors
[mirror_mask_ffs(mirrors
) - 1];
6987 /* In normal circumstances 'm' will not be NULL. However,
6988 * if mirrors are reconfigured, we can temporarily get out
6989 * of sync in facet_revalidate(). We could "correct" the
6990 * mirror list before reaching here, but doing that would
6991 * not properly account the traffic stats we've currently
6992 * accumulated for previous mirror configuration. */
6996 m
->packet_count
+= packets
;
6997 m
->byte_count
+= bytes
;
7001 /* A VM broadcasts a gratuitous ARP to indicate that it has resumed after
7002 * migration. Older Citrix-patched Linux DomU used gratuitous ARP replies to
7003 * indicate this; newer upstream kernels use gratuitous ARP requests. */
7005 is_gratuitous_arp(const struct flow
*flow
)
7007 return (flow
->dl_type
== htons(ETH_TYPE_ARP
)
7008 && eth_addr_is_broadcast(flow
->dl_dst
)
7009 && (flow
->nw_proto
== ARP_OP_REPLY
7010 || (flow
->nw_proto
== ARP_OP_REQUEST
7011 && flow
->nw_src
== flow
->nw_dst
)));
7015 update_learning_table(struct ofproto_dpif
*ofproto
,
7016 const struct flow
*flow
, int vlan
,
7017 struct ofbundle
*in_bundle
)
7019 struct mac_entry
*mac
;
7021 /* Don't learn the OFPP_NONE port. */
7022 if (in_bundle
== &ofpp_none_bundle
) {
7026 if (!mac_learning_may_learn(ofproto
->ml
, flow
->dl_src
, vlan
)) {
7030 mac
= mac_learning_insert(ofproto
->ml
, flow
->dl_src
, vlan
);
7031 if (is_gratuitous_arp(flow
)) {
7032 /* We don't want to learn from gratuitous ARP packets that are
7033 * reflected back over bond slaves so we lock the learning table. */
7034 if (!in_bundle
->bond
) {
7035 mac_entry_set_grat_arp_lock(mac
);
7036 } else if (mac_entry_is_grat_arp_locked(mac
)) {
7041 if (mac_entry_is_new(mac
) || mac
->port
.p
!= in_bundle
) {
7042 /* The log messages here could actually be useful in debugging,
7043 * so keep the rate limit relatively high. */
7044 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(30, 300);
7045 VLOG_DBG_RL(&rl
, "bridge %s: learned that "ETH_ADDR_FMT
" is "
7046 "on port %s in VLAN %d",
7047 ofproto
->up
.name
, ETH_ADDR_ARGS(flow
->dl_src
),
7048 in_bundle
->name
, vlan
);
7050 mac
->port
.p
= in_bundle
;
7051 tag_set_add(&ofproto
->backer
->revalidate_set
,
7052 mac_learning_changed(ofproto
->ml
, mac
));
7056 static struct ofbundle
*
7057 lookup_input_bundle(const struct ofproto_dpif
*ofproto
, uint16_t in_port
,
7058 bool warn
, struct ofport_dpif
**in_ofportp
)
7060 struct ofport_dpif
*ofport
;
7062 /* Find the port and bundle for the received packet. */
7063 ofport
= get_ofp_port(ofproto
, in_port
);
7065 *in_ofportp
= ofport
;
7067 if (ofport
&& ofport
->bundle
) {
7068 return ofport
->bundle
;
7071 /* Special-case OFPP_NONE, which a controller may use as the ingress
7072 * port for traffic that it is sourcing. */
7073 if (in_port
== OFPP_NONE
) {
7074 return &ofpp_none_bundle
;
7077 /* Odd. A few possible reasons here:
7079 * - We deleted a port but there are still a few packets queued up
7082 * - Someone externally added a port (e.g. "ovs-dpctl add-if") that
7083 * we don't know about.
7085 * - The ofproto client didn't configure the port as part of a bundle.
7086 * This is particularly likely to happen if a packet was received on the
7087 * port after it was created, but before the client had a chance to
7088 * configure its bundle.
7091 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(1, 5);
7093 VLOG_WARN_RL(&rl
, "bridge %s: received packet on unknown "
7094 "port %"PRIu16
, ofproto
->up
.name
, in_port
);
7099 /* Determines whether packets in 'flow' within 'ofproto' should be forwarded or
7100 * dropped. Returns true if they may be forwarded, false if they should be
7103 * 'in_port' must be the ofport_dpif that corresponds to flow->in_port.
7104 * 'in_port' must be part of a bundle (e.g. in_port->bundle must be nonnull).
7106 * 'vlan' must be the VLAN that corresponds to flow->vlan_tci on 'in_port', as
7107 * returned by input_vid_to_vlan(). It must be a valid VLAN for 'in_port', as
7108 * checked by input_vid_is_valid().
7110 * May also add tags to '*tags', although the current implementation only does
7111 * so in one special case.
7114 is_admissible(struct action_xlate_ctx
*ctx
, struct ofport_dpif
*in_port
,
7117 struct ofproto_dpif
*ofproto
= ctx
->ofproto
;
7118 struct flow
*flow
= &ctx
->flow
;
7119 struct ofbundle
*in_bundle
= in_port
->bundle
;
7121 /* Drop frames for reserved multicast addresses
7122 * only if forward_bpdu option is absent. */
7123 if (!ofproto
->up
.forward_bpdu
&& eth_addr_is_reserved(flow
->dl_dst
)) {
7124 xlate_report(ctx
, "packet has reserved destination MAC, dropping");
7128 if (in_bundle
->bond
) {
7129 struct mac_entry
*mac
;
7131 switch (bond_check_admissibility(in_bundle
->bond
, in_port
,
7132 flow
->dl_dst
, &ctx
->tags
)) {
7137 xlate_report(ctx
, "bonding refused admissibility, dropping");
7140 case BV_DROP_IF_MOVED
:
7141 mac
= mac_learning_lookup(ofproto
->ml
, flow
->dl_src
, vlan
, NULL
);
7142 if (mac
&& mac
->port
.p
!= in_bundle
&&
7143 (!is_gratuitous_arp(flow
)
7144 || mac_entry_is_grat_arp_locked(mac
))) {
7145 xlate_report(ctx
, "SLB bond thinks this packet looped back, "
7157 xlate_normal(struct action_xlate_ctx
*ctx
)
7159 struct ofport_dpif
*in_port
;
7160 struct ofbundle
*in_bundle
;
7161 struct mac_entry
*mac
;
7165 ctx
->has_normal
= true;
7167 in_bundle
= lookup_input_bundle(ctx
->ofproto
, ctx
->flow
.in_port
,
7168 ctx
->packet
!= NULL
, &in_port
);
7170 xlate_report(ctx
, "no input bundle, dropping");
7174 /* Drop malformed frames. */
7175 if (ctx
->flow
.dl_type
== htons(ETH_TYPE_VLAN
) &&
7176 !(ctx
->flow
.vlan_tci
& htons(VLAN_CFI
))) {
7177 if (ctx
->packet
!= NULL
) {
7178 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(1, 5);
7179 VLOG_WARN_RL(&rl
, "bridge %s: dropping packet with partial "
7180 "VLAN tag received on port %s",
7181 ctx
->ofproto
->up
.name
, in_bundle
->name
);
7183 xlate_report(ctx
, "partial VLAN tag, dropping");
7187 /* Drop frames on bundles reserved for mirroring. */
7188 if (in_bundle
->mirror_out
) {
7189 if (ctx
->packet
!= NULL
) {
7190 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(1, 5);
7191 VLOG_WARN_RL(&rl
, "bridge %s: dropping packet received on port "
7192 "%s, which is reserved exclusively for mirroring",
7193 ctx
->ofproto
->up
.name
, in_bundle
->name
);
7195 xlate_report(ctx
, "input port is mirror output port, dropping");
7200 vid
= vlan_tci_to_vid(ctx
->flow
.vlan_tci
);
7201 if (!input_vid_is_valid(vid
, in_bundle
, ctx
->packet
!= NULL
)) {
7202 xlate_report(ctx
, "disallowed VLAN VID for this input port, dropping");
7205 vlan
= input_vid_to_vlan(in_bundle
, vid
);
7207 /* Check other admissibility requirements. */
7208 if (in_port
&& !is_admissible(ctx
, in_port
, vlan
)) {
7212 /* Learn source MAC. */
7213 if (ctx
->may_learn
) {
7214 update_learning_table(ctx
->ofproto
, &ctx
->flow
, vlan
, in_bundle
);
7217 /* Determine output bundle. */
7218 mac
= mac_learning_lookup(ctx
->ofproto
->ml
, ctx
->flow
.dl_dst
, vlan
,
7221 if (mac
->port
.p
!= in_bundle
) {
7222 xlate_report(ctx
, "forwarding to learned port");
7223 output_normal(ctx
, mac
->port
.p
, vlan
);
7225 xlate_report(ctx
, "learned port is input port, dropping");
7228 struct ofbundle
*bundle
;
7230 xlate_report(ctx
, "no learned MAC for destination, flooding");
7231 HMAP_FOR_EACH (bundle
, hmap_node
, &ctx
->ofproto
->bundles
) {
7232 if (bundle
!= in_bundle
7233 && ofbundle_includes_vlan(bundle
, vlan
)
7234 && bundle
->floodable
7235 && !bundle
->mirror_out
) {
7236 output_normal(ctx
, bundle
, vlan
);
7239 ctx
->nf_output_iface
= NF_OUT_FLOOD
;
7243 /* Optimized flow revalidation.
7245 * It's a difficult problem, in general, to tell which facets need to have
7246 * their actions recalculated whenever the OpenFlow flow table changes. We
7247 * don't try to solve that general problem: for most kinds of OpenFlow flow
7248 * table changes, we recalculate the actions for every facet. This is
7249 * relatively expensive, but it's good enough if the OpenFlow flow table
7250 * doesn't change very often.
7252 * However, we can expect one particular kind of OpenFlow flow table change to
7253 * happen frequently: changes caused by MAC learning. To avoid wasting a lot
7254 * of CPU on revalidating every facet whenever MAC learning modifies the flow
7255 * table, we add a special case that applies to flow tables in which every rule
7256 * has the same form (that is, the same wildcards), except that the table is
7257 * also allowed to have a single "catch-all" flow that matches all packets. We
7258 * optimize this case by tagging all of the facets that resubmit into the table
7259 * and invalidating the same tag whenever a flow changes in that table. The
7260 * end result is that we revalidate just the facets that need it (and sometimes
7261 * a few more, but not all of the facets or even all of the facets that
7262 * resubmit to the table modified by MAC learning). */
7264 /* Calculates the tag to use for 'flow' and mask 'mask' when it is inserted
7265 * into an OpenFlow table with the given 'basis'. */
7267 rule_calculate_tag(const struct flow
*flow
, const struct minimask
*mask
,
7270 if (minimask_is_catchall(mask
)) {
7273 uint32_t hash
= flow_hash_in_minimask(flow
, mask
, secret
);
7274 return tag_create_deterministic(hash
);
7278 /* Following a change to OpenFlow table 'table_id' in 'ofproto', update the
7279 * taggability of that table.
7281 * This function must be called after *each* change to a flow table. If you
7282 * skip calling it on some changes then the pointer comparisons at the end can
7283 * be invalid if you get unlucky. For example, if a flow removal causes a
7284 * cls_table to be destroyed and then a flow insertion causes a cls_table with
7285 * different wildcards to be created with the same address, then this function
7286 * will incorrectly skip revalidation. */
7288 table_update_taggable(struct ofproto_dpif
*ofproto
, uint8_t table_id
)
7290 struct table_dpif
*table
= &ofproto
->tables
[table_id
];
7291 const struct oftable
*oftable
= &ofproto
->up
.tables
[table_id
];
7292 struct cls_table
*catchall
, *other
;
7293 struct cls_table
*t
;
7295 catchall
= other
= NULL
;
7297 switch (hmap_count(&oftable
->cls
.tables
)) {
7299 /* We could tag this OpenFlow table but it would make the logic a
7300 * little harder and it's a corner case that doesn't seem worth it
7306 HMAP_FOR_EACH (t
, hmap_node
, &oftable
->cls
.tables
) {
7307 if (cls_table_is_catchall(t
)) {
7309 } else if (!other
) {
7312 /* Indicate that we can't tag this by setting both tables to
7313 * NULL. (We know that 'catchall' is already NULL.) */
7320 /* Can't tag this table. */
7324 if (table
->catchall_table
!= catchall
|| table
->other_table
!= other
) {
7325 table
->catchall_table
= catchall
;
7326 table
->other_table
= other
;
7327 ofproto
->backer
->need_revalidate
= REV_FLOW_TABLE
;
7331 /* Given 'rule' that has changed in some way (either it is a rule being
7332 * inserted, a rule being deleted, or a rule whose actions are being
7333 * modified), marks facets for revalidation to ensure that packets will be
7334 * forwarded correctly according to the new state of the flow table.
7336 * This function must be called after *each* change to a flow table. See
7337 * the comment on table_update_taggable() for more information. */
7339 rule_invalidate(const struct rule_dpif
*rule
)
7341 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(rule
->up
.ofproto
);
7343 table_update_taggable(ofproto
, rule
->up
.table_id
);
7345 if (!ofproto
->backer
->need_revalidate
) {
7346 struct table_dpif
*table
= &ofproto
->tables
[rule
->up
.table_id
];
7348 if (table
->other_table
&& rule
->tag
) {
7349 tag_set_add(&ofproto
->backer
->revalidate_set
, rule
->tag
);
7351 ofproto
->backer
->need_revalidate
= REV_FLOW_TABLE
;
7357 set_frag_handling(struct ofproto
*ofproto_
,
7358 enum ofp_config_flags frag_handling
)
7360 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
7361 if (frag_handling
!= OFPC_FRAG_REASM
) {
7362 ofproto
->backer
->need_revalidate
= REV_RECONFIGURE
;
7370 packet_out(struct ofproto
*ofproto_
, struct ofpbuf
*packet
,
7371 const struct flow
*flow
,
7372 const struct ofpact
*ofpacts
, size_t ofpacts_len
)
7374 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
7375 struct odputil_keybuf keybuf
;
7376 struct dpif_flow_stats stats
;
7380 struct action_xlate_ctx ctx
;
7381 uint64_t odp_actions_stub
[1024 / 8];
7382 struct ofpbuf odp_actions
;
7384 ofpbuf_use_stack(&key
, &keybuf
, sizeof keybuf
);
7385 odp_flow_key_from_flow(&key
, flow
,
7386 ofp_port_to_odp_port(ofproto
, flow
->in_port
));
7388 dpif_flow_stats_extract(flow
, packet
, time_msec(), &stats
);
7390 action_xlate_ctx_init(&ctx
, ofproto
, flow
, flow
->vlan_tci
, NULL
,
7391 packet_get_tcp_flags(packet
, flow
), packet
);
7392 ctx
.resubmit_stats
= &stats
;
7394 ofpbuf_use_stub(&odp_actions
,
7395 odp_actions_stub
, sizeof odp_actions_stub
);
7396 xlate_actions(&ctx
, ofpacts
, ofpacts_len
, &odp_actions
);
7397 dpif_execute(ofproto
->backer
->dpif
, key
.data
, key
.size
,
7398 odp_actions
.data
, odp_actions
.size
, packet
);
7399 ofpbuf_uninit(&odp_actions
);
7407 set_netflow(struct ofproto
*ofproto_
,
7408 const struct netflow_options
*netflow_options
)
7410 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
7412 if (netflow_options
) {
7413 if (!ofproto
->netflow
) {
7414 ofproto
->netflow
= netflow_create();
7416 return netflow_set_options(ofproto
->netflow
, netflow_options
);
7418 netflow_destroy(ofproto
->netflow
);
7419 ofproto
->netflow
= NULL
;
7425 get_netflow_ids(const struct ofproto
*ofproto_
,
7426 uint8_t *engine_type
, uint8_t *engine_id
)
7428 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
7430 dpif_get_netflow_ids(ofproto
->backer
->dpif
, engine_type
, engine_id
);
7434 send_active_timeout(struct ofproto_dpif
*ofproto
, struct facet
*facet
)
7436 if (!facet_is_controller_flow(facet
) &&
7437 netflow_active_timeout_expired(ofproto
->netflow
, &facet
->nf_flow
)) {
7438 struct subfacet
*subfacet
;
7439 struct ofexpired expired
;
7441 LIST_FOR_EACH (subfacet
, list_node
, &facet
->subfacets
) {
7442 if (subfacet
->path
== SF_FAST_PATH
) {
7443 struct dpif_flow_stats stats
;
7445 subfacet_reinstall(subfacet
, &stats
);
7446 subfacet_update_stats(subfacet
, &stats
);
7450 expired
.flow
= facet
->flow
;
7451 expired
.packet_count
= facet
->packet_count
;
7452 expired
.byte_count
= facet
->byte_count
;
7453 expired
.used
= facet
->used
;
7454 netflow_expire(ofproto
->netflow
, &facet
->nf_flow
, &expired
);
7459 send_netflow_active_timeouts(struct ofproto_dpif
*ofproto
)
7461 struct facet
*facet
;
7463 HMAP_FOR_EACH (facet
, hmap_node
, &ofproto
->facets
) {
7464 send_active_timeout(ofproto
, facet
);
7468 static struct ofproto_dpif
*
7469 ofproto_dpif_lookup(const char *name
)
7471 struct ofproto_dpif
*ofproto
;
7473 HMAP_FOR_EACH_WITH_HASH (ofproto
, all_ofproto_dpifs_node
,
7474 hash_string(name
, 0), &all_ofproto_dpifs
) {
7475 if (!strcmp(ofproto
->up
.name
, name
)) {
7483 ofproto_unixctl_fdb_flush(struct unixctl_conn
*conn
, int argc
,
7484 const char *argv
[], void *aux OVS_UNUSED
)
7486 struct ofproto_dpif
*ofproto
;
7489 ofproto
= ofproto_dpif_lookup(argv
[1]);
7491 unixctl_command_reply_error(conn
, "no such bridge");
7494 mac_learning_flush(ofproto
->ml
, &ofproto
->backer
->revalidate_set
);
7496 HMAP_FOR_EACH (ofproto
, all_ofproto_dpifs_node
, &all_ofproto_dpifs
) {
7497 mac_learning_flush(ofproto
->ml
, &ofproto
->backer
->revalidate_set
);
7501 unixctl_command_reply(conn
, "table successfully flushed");
7505 ofproto_unixctl_fdb_show(struct unixctl_conn
*conn
, int argc OVS_UNUSED
,
7506 const char *argv
[], void *aux OVS_UNUSED
)
7508 struct ds ds
= DS_EMPTY_INITIALIZER
;
7509 const struct ofproto_dpif
*ofproto
;
7510 const struct mac_entry
*e
;
7512 ofproto
= ofproto_dpif_lookup(argv
[1]);
7514 unixctl_command_reply_error(conn
, "no such bridge");
7518 ds_put_cstr(&ds
, " port VLAN MAC Age\n");
7519 LIST_FOR_EACH (e
, lru_node
, &ofproto
->ml
->lrus
) {
7520 struct ofbundle
*bundle
= e
->port
.p
;
7521 ds_put_format(&ds
, "%5d %4d "ETH_ADDR_FMT
" %3d\n",
7522 ofbundle_get_a_port(bundle
)->odp_port
,
7523 e
->vlan
, ETH_ADDR_ARGS(e
->mac
),
7524 mac_entry_age(ofproto
->ml
, e
));
7526 unixctl_command_reply(conn
, ds_cstr(&ds
));
7531 struct action_xlate_ctx ctx
;
7537 trace_format_rule(struct ds
*result
, uint8_t table_id
, int level
,
7538 const struct rule_dpif
*rule
)
7540 ds_put_char_multiple(result
, '\t', level
);
7542 ds_put_cstr(result
, "No match\n");
7546 ds_put_format(result
, "Rule: table=%"PRIu8
" cookie=%#"PRIx64
" ",
7547 table_id
, ntohll(rule
->up
.flow_cookie
));
7548 cls_rule_format(&rule
->up
.cr
, result
);
7549 ds_put_char(result
, '\n');
7551 ds_put_char_multiple(result
, '\t', level
);
7552 ds_put_cstr(result
, "OpenFlow ");
7553 ofpacts_format(rule
->up
.ofpacts
, rule
->up
.ofpacts_len
, result
);
7554 ds_put_char(result
, '\n');
7558 trace_format_flow(struct ds
*result
, int level
, const char *title
,
7559 struct trace_ctx
*trace
)
7561 ds_put_char_multiple(result
, '\t', level
);
7562 ds_put_format(result
, "%s: ", title
);
7563 if (flow_equal(&trace
->ctx
.flow
, &trace
->flow
)) {
7564 ds_put_cstr(result
, "unchanged");
7566 flow_format(result
, &trace
->ctx
.flow
);
7567 trace
->flow
= trace
->ctx
.flow
;
7569 ds_put_char(result
, '\n');
7573 trace_format_regs(struct ds
*result
, int level
, const char *title
,
7574 struct trace_ctx
*trace
)
7578 ds_put_char_multiple(result
, '\t', level
);
7579 ds_put_format(result
, "%s:", title
);
7580 for (i
= 0; i
< FLOW_N_REGS
; i
++) {
7581 ds_put_format(result
, " reg%zu=0x%"PRIx32
, i
, trace
->flow
.regs
[i
]);
7583 ds_put_char(result
, '\n');
7587 trace_format_odp(struct ds
*result
, int level
, const char *title
,
7588 struct trace_ctx
*trace
)
7590 struct ofpbuf
*odp_actions
= trace
->ctx
.odp_actions
;
7592 ds_put_char_multiple(result
, '\t', level
);
7593 ds_put_format(result
, "%s: ", title
);
7594 format_odp_actions(result
, odp_actions
->data
, odp_actions
->size
);
7595 ds_put_char(result
, '\n');
7599 trace_resubmit(struct action_xlate_ctx
*ctx
, struct rule_dpif
*rule
)
7601 struct trace_ctx
*trace
= CONTAINER_OF(ctx
, struct trace_ctx
, ctx
);
7602 struct ds
*result
= trace
->result
;
7604 ds_put_char(result
, '\n');
7605 trace_format_flow(result
, ctx
->recurse
+ 1, "Resubmitted flow", trace
);
7606 trace_format_regs(result
, ctx
->recurse
+ 1, "Resubmitted regs", trace
);
7607 trace_format_odp(result
, ctx
->recurse
+ 1, "Resubmitted odp", trace
);
7608 trace_format_rule(result
, ctx
->table_id
, ctx
->recurse
+ 1, rule
);
7612 trace_report(struct action_xlate_ctx
*ctx
, const char *s
)
7614 struct trace_ctx
*trace
= CONTAINER_OF(ctx
, struct trace_ctx
, ctx
);
7615 struct ds
*result
= trace
->result
;
7617 ds_put_char_multiple(result
, '\t', ctx
->recurse
);
7618 ds_put_cstr(result
, s
);
7619 ds_put_char(result
, '\n');
7623 ofproto_unixctl_trace(struct unixctl_conn
*conn
, int argc
, const char *argv
[],
7624 void *aux OVS_UNUSED
)
7626 const char *dpname
= argv
[1];
7627 struct ofproto_dpif
*ofproto
;
7628 struct ofpbuf odp_key
;
7629 struct ofpbuf
*packet
;
7630 ovs_be16 initial_tci
;
7636 ofpbuf_init(&odp_key
, 0);
7639 ofproto
= ofproto_dpif_lookup(dpname
);
7641 unixctl_command_reply_error(conn
, "Unknown ofproto (use ofproto/list "
7645 if (argc
== 3 || (argc
== 4 && !strcmp(argv
[3], "-generate"))) {
7646 /* ofproto/trace dpname flow [-generate] */
7647 const char *flow_s
= argv
[2];
7648 const char *generate_s
= argv
[3];
7650 /* Allow 'flow_s' to be either a datapath flow or an OpenFlow-like
7651 * flow. We guess which type it is based on whether 'flow_s' contains
7652 * an '(', since a datapath flow always contains '(') but an
7653 * OpenFlow-like flow should not (in fact it's allowed but I believe
7654 * that's not documented anywhere).
7656 * An alternative would be to try to parse 'flow_s' both ways, but then
7657 * it would be tricky giving a sensible error message. After all, do
7658 * you just say "syntax error" or do you present both error messages?
7659 * Both choices seem lousy. */
7660 if (strchr(flow_s
, '(')) {
7663 /* Convert string to datapath key. */
7664 ofpbuf_init(&odp_key
, 0);
7665 error
= odp_flow_key_from_string(flow_s
, NULL
, &odp_key
);
7667 unixctl_command_reply_error(conn
, "Bad flow syntax");
7671 /* XXX: Since we allow the user to specify an ofproto, it's
7672 * possible they will specify a different ofproto than the one the
7673 * port actually belongs too. Ideally we should simply remove the
7674 * ability to specify the ofproto. */
7675 if (ofproto_receive(ofproto
->backer
, NULL
, odp_key
.data
,
7676 odp_key
.size
, &flow
, NULL
, NULL
, NULL
,
7678 unixctl_command_reply_error(conn
, "Invalid flow");
7684 error_s
= parse_ofp_exact_flow(&flow
, argv
[2]);
7686 unixctl_command_reply_error(conn
, error_s
);
7691 initial_tci
= flow
.vlan_tci
;
7694 /* Generate a packet, if requested. */
7696 packet
= ofpbuf_new(0);
7697 flow_compose(packet
, &flow
);
7699 } else if (argc
== 7) {
7700 /* ofproto/trace dpname priority tun_id in_port mark packet */
7701 const char *priority_s
= argv
[2];
7702 const char *tun_id_s
= argv
[3];
7703 const char *in_port_s
= argv
[4];
7704 const char *mark_s
= argv
[5];
7705 const char *packet_s
= argv
[6];
7706 uint32_t in_port
= atoi(in_port_s
);
7707 ovs_be64 tun_id
= htonll(strtoull(tun_id_s
, NULL
, 0));
7708 uint32_t priority
= atoi(priority_s
);
7709 uint32_t mark
= atoi(mark_s
);
7712 msg
= eth_from_hex(packet_s
, &packet
);
7714 unixctl_command_reply_error(conn
, msg
);
7718 ds_put_cstr(&result
, "Packet: ");
7719 s
= ofp_packet_to_string(packet
->data
, packet
->size
);
7720 ds_put_cstr(&result
, s
);
7723 flow_extract(packet
, priority
, mark
, NULL
, in_port
, &flow
);
7724 flow
.tunnel
.tun_id
= tun_id
;
7725 initial_tci
= flow
.vlan_tci
;
7727 unixctl_command_reply_error(conn
, "Bad command syntax");
7731 ofproto_trace(ofproto
, &flow
, packet
, initial_tci
, &result
);
7732 unixctl_command_reply(conn
, ds_cstr(&result
));
7735 ds_destroy(&result
);
7736 ofpbuf_delete(packet
);
7737 ofpbuf_uninit(&odp_key
);
7741 ofproto_trace(struct ofproto_dpif
*ofproto
, const struct flow
*flow
,
7742 const struct ofpbuf
*packet
, ovs_be16 initial_tci
,
7745 struct rule_dpif
*rule
;
7747 ds_put_cstr(ds
, "Flow: ");
7748 flow_format(ds
, flow
);
7749 ds_put_char(ds
, '\n');
7751 rule
= rule_dpif_lookup(ofproto
, flow
);
7753 trace_format_rule(ds
, 0, 0, rule
);
7754 if (rule
== ofproto
->miss_rule
) {
7755 ds_put_cstr(ds
, "\nNo match, flow generates \"packet in\"s.\n");
7756 } else if (rule
== ofproto
->no_packet_in_rule
) {
7757 ds_put_cstr(ds
, "\nNo match, packets dropped because "
7758 "OFPPC_NO_PACKET_IN is set on in_port.\n");
7762 uint64_t odp_actions_stub
[1024 / 8];
7763 struct ofpbuf odp_actions
;
7765 struct trace_ctx trace
;
7768 tcp_flags
= packet
? packet_get_tcp_flags(packet
, flow
) : 0;
7771 ofpbuf_use_stub(&odp_actions
,
7772 odp_actions_stub
, sizeof odp_actions_stub
);
7773 action_xlate_ctx_init(&trace
.ctx
, ofproto
, flow
, initial_tci
,
7774 rule
, tcp_flags
, packet
);
7775 trace
.ctx
.resubmit_hook
= trace_resubmit
;
7776 trace
.ctx
.report_hook
= trace_report
;
7777 xlate_actions(&trace
.ctx
, rule
->up
.ofpacts
, rule
->up
.ofpacts_len
,
7780 ds_put_char(ds
, '\n');
7781 trace_format_flow(ds
, 0, "Final flow", &trace
);
7782 ds_put_cstr(ds
, "Datapath actions: ");
7783 format_odp_actions(ds
, odp_actions
.data
, odp_actions
.size
);
7784 ofpbuf_uninit(&odp_actions
);
7786 if (trace
.ctx
.slow
) {
7787 enum slow_path_reason slow
;
7789 ds_put_cstr(ds
, "\nThis flow is handled by the userspace "
7790 "slow path because it:");
7791 for (slow
= trace
.ctx
.slow
; slow
; ) {
7792 enum slow_path_reason bit
= rightmost_1bit(slow
);
7796 ds_put_cstr(ds
, "\n\t- Consists of CFM packets.");
7799 ds_put_cstr(ds
, "\n\t- Consists of LACP packets.");
7802 ds_put_cstr(ds
, "\n\t- Consists of STP packets.");
7805 ds_put_cstr(ds
, "\n\t- Needs in-band special case "
7808 ds_put_cstr(ds
, "\n\t (The datapath actions are "
7809 "incomplete--for complete actions, "
7810 "please supply a packet.)");
7813 case SLOW_CONTROLLER
:
7814 ds_put_cstr(ds
, "\n\t- Sends \"packet-in\" messages "
7815 "to the OpenFlow controller.");
7818 ds_put_cstr(ds
, "\n\t- Needs more specific matching "
7819 "than the datapath supports.");
7826 if (slow
& ~SLOW_MATCH
) {
7827 ds_put_cstr(ds
, "\nThe datapath actions above do not reflect "
7828 "the special slow-path processing.");
7835 ofproto_dpif_clog(struct unixctl_conn
*conn OVS_UNUSED
, int argc OVS_UNUSED
,
7836 const char *argv
[] OVS_UNUSED
, void *aux OVS_UNUSED
)
7839 unixctl_command_reply(conn
, NULL
);
7843 ofproto_dpif_unclog(struct unixctl_conn
*conn OVS_UNUSED
, int argc OVS_UNUSED
,
7844 const char *argv
[] OVS_UNUSED
, void *aux OVS_UNUSED
)
7847 unixctl_command_reply(conn
, NULL
);
7850 /* Runs a self-check of flow translations in 'ofproto'. Appends a message to
7851 * 'reply' describing the results. */
7853 ofproto_dpif_self_check__(struct ofproto_dpif
*ofproto
, struct ds
*reply
)
7855 struct facet
*facet
;
7859 HMAP_FOR_EACH (facet
, hmap_node
, &ofproto
->facets
) {
7860 if (!facet_check_consistency(facet
)) {
7865 ofproto
->backer
->need_revalidate
= REV_INCONSISTENCY
;
7869 ds_put_format(reply
, "%s: self-check failed (%d errors)\n",
7870 ofproto
->up
.name
, errors
);
7872 ds_put_format(reply
, "%s: self-check passed\n", ofproto
->up
.name
);
7877 ofproto_dpif_self_check(struct unixctl_conn
*conn
,
7878 int argc
, const char *argv
[], void *aux OVS_UNUSED
)
7880 struct ds reply
= DS_EMPTY_INITIALIZER
;
7881 struct ofproto_dpif
*ofproto
;
7884 ofproto
= ofproto_dpif_lookup(argv
[1]);
7886 unixctl_command_reply_error(conn
, "Unknown ofproto (use "
7887 "ofproto/list for help)");
7890 ofproto_dpif_self_check__(ofproto
, &reply
);
7892 HMAP_FOR_EACH (ofproto
, all_ofproto_dpifs_node
, &all_ofproto_dpifs
) {
7893 ofproto_dpif_self_check__(ofproto
, &reply
);
7897 unixctl_command_reply(conn
, ds_cstr(&reply
));
7901 /* Store the current ofprotos in 'ofproto_shash'. Returns a sorted list
7902 * of the 'ofproto_shash' nodes. It is the responsibility of the caller
7903 * to destroy 'ofproto_shash' and free the returned value. */
7904 static const struct shash_node
**
7905 get_ofprotos(struct shash
*ofproto_shash
)
7907 const struct ofproto_dpif
*ofproto
;
7909 HMAP_FOR_EACH (ofproto
, all_ofproto_dpifs_node
, &all_ofproto_dpifs
) {
7910 char *name
= xasprintf("%s@%s", ofproto
->up
.type
, ofproto
->up
.name
);
7911 shash_add_nocopy(ofproto_shash
, name
, ofproto
);
7914 return shash_sort(ofproto_shash
);
7918 ofproto_unixctl_dpif_dump_dps(struct unixctl_conn
*conn
, int argc OVS_UNUSED
,
7919 const char *argv
[] OVS_UNUSED
,
7920 void *aux OVS_UNUSED
)
7922 struct ds ds
= DS_EMPTY_INITIALIZER
;
7923 struct shash ofproto_shash
;
7924 const struct shash_node
**sorted_ofprotos
;
7927 shash_init(&ofproto_shash
);
7928 sorted_ofprotos
= get_ofprotos(&ofproto_shash
);
7929 for (i
= 0; i
< shash_count(&ofproto_shash
); i
++) {
7930 const struct shash_node
*node
= sorted_ofprotos
[i
];
7931 ds_put_format(&ds
, "%s\n", node
->name
);
7934 shash_destroy(&ofproto_shash
);
7935 free(sorted_ofprotos
);
7937 unixctl_command_reply(conn
, ds_cstr(&ds
));
7942 show_dp_format(const struct ofproto_dpif
*ofproto
, struct ds
*ds
)
7944 struct dpif_dp_stats s
;
7945 const struct shash_node
**ports
;
7948 dpif_get_dp_stats(ofproto
->backer
->dpif
, &s
);
7950 ds_put_format(ds
, "%s (%s):\n", ofproto
->up
.name
,
7951 dpif_name(ofproto
->backer
->dpif
));
7952 /* xxx It would be better to show bridge-specific stats instead
7953 * xxx of dp ones. */
7955 "\tlookups: hit:%"PRIu64
" missed:%"PRIu64
" lost:%"PRIu64
"\n",
7956 s
.n_hit
, s
.n_missed
, s
.n_lost
);
7957 ds_put_format(ds
, "\tflows: %zu\n",
7958 hmap_count(&ofproto
->subfacets
));
7960 ports
= shash_sort(&ofproto
->up
.port_by_name
);
7961 for (i
= 0; i
< shash_count(&ofproto
->up
.port_by_name
); i
++) {
7962 const struct shash_node
*node
= ports
[i
];
7963 struct ofport
*ofport
= node
->data
;
7964 const char *name
= netdev_get_name(ofport
->netdev
);
7965 const char *type
= netdev_get_type(ofport
->netdev
);
7968 ds_put_format(ds
, "\t%s %u/", name
, ofport
->ofp_port
);
7970 odp_port
= ofp_port_to_odp_port(ofproto
, ofport
->ofp_port
);
7971 if (odp_port
!= OVSP_NONE
) {
7972 ds_put_format(ds
, "%"PRIu32
":", odp_port
);
7974 ds_put_cstr(ds
, "none:");
7977 if (strcmp(type
, "system")) {
7978 struct netdev
*netdev
;
7981 ds_put_format(ds
, " (%s", type
);
7983 error
= netdev_open(name
, type
, &netdev
);
7988 error
= netdev_get_config(netdev
, &config
);
7990 const struct smap_node
**nodes
;
7993 nodes
= smap_sort(&config
);
7994 for (i
= 0; i
< smap_count(&config
); i
++) {
7995 const struct smap_node
*node
= nodes
[i
];
7996 ds_put_format(ds
, "%c %s=%s", i
? ',' : ':',
7997 node
->key
, node
->value
);
8001 smap_destroy(&config
);
8003 netdev_close(netdev
);
8005 ds_put_char(ds
, ')');
8007 ds_put_char(ds
, '\n');
8013 ofproto_unixctl_dpif_show(struct unixctl_conn
*conn
, int argc
,
8014 const char *argv
[], void *aux OVS_UNUSED
)
8016 struct ds ds
= DS_EMPTY_INITIALIZER
;
8017 const struct ofproto_dpif
*ofproto
;
8021 for (i
= 1; i
< argc
; i
++) {
8022 ofproto
= ofproto_dpif_lookup(argv
[i
]);
8024 ds_put_format(&ds
, "Unknown bridge %s (use dpif/dump-dps "
8025 "for help)", argv
[i
]);
8026 unixctl_command_reply_error(conn
, ds_cstr(&ds
));
8029 show_dp_format(ofproto
, &ds
);
8032 struct shash ofproto_shash
;
8033 const struct shash_node
**sorted_ofprotos
;
8036 shash_init(&ofproto_shash
);
8037 sorted_ofprotos
= get_ofprotos(&ofproto_shash
);
8038 for (i
= 0; i
< shash_count(&ofproto_shash
); i
++) {
8039 const struct shash_node
*node
= sorted_ofprotos
[i
];
8040 show_dp_format(node
->data
, &ds
);
8043 shash_destroy(&ofproto_shash
);
8044 free(sorted_ofprotos
);
8047 unixctl_command_reply(conn
, ds_cstr(&ds
));
8052 ofproto_unixctl_dpif_dump_flows(struct unixctl_conn
*conn
,
8053 int argc OVS_UNUSED
, const char *argv
[],
8054 void *aux OVS_UNUSED
)
8056 struct ds ds
= DS_EMPTY_INITIALIZER
;
8057 const struct ofproto_dpif
*ofproto
;
8058 struct subfacet
*subfacet
;
8060 ofproto
= ofproto_dpif_lookup(argv
[1]);
8062 unixctl_command_reply_error(conn
, "no such bridge");
8066 update_stats(ofproto
->backer
);
8068 HMAP_FOR_EACH (subfacet
, hmap_node
, &ofproto
->subfacets
) {
8069 odp_flow_key_format(subfacet
->key
, subfacet
->key_len
, &ds
);
8071 ds_put_format(&ds
, ", packets:%"PRIu64
", bytes:%"PRIu64
", used:",
8072 subfacet
->dp_packet_count
, subfacet
->dp_byte_count
);
8073 if (subfacet
->used
) {
8074 ds_put_format(&ds
, "%.3fs",
8075 (time_msec() - subfacet
->used
) / 1000.0);
8077 ds_put_format(&ds
, "never");
8079 if (subfacet
->facet
->tcp_flags
) {
8080 ds_put_cstr(&ds
, ", flags:");
8081 packet_format_tcp_flags(&ds
, subfacet
->facet
->tcp_flags
);
8084 ds_put_cstr(&ds
, ", actions:");
8085 format_odp_actions(&ds
, subfacet
->actions
, subfacet
->actions_len
);
8086 ds_put_char(&ds
, '\n');
8089 unixctl_command_reply(conn
, ds_cstr(&ds
));
8094 ofproto_unixctl_dpif_del_flows(struct unixctl_conn
*conn
,
8095 int argc OVS_UNUSED
, const char *argv
[],
8096 void *aux OVS_UNUSED
)
8098 struct ds ds
= DS_EMPTY_INITIALIZER
;
8099 struct ofproto_dpif
*ofproto
;
8101 ofproto
= ofproto_dpif_lookup(argv
[1]);
8103 unixctl_command_reply_error(conn
, "no such bridge");
8107 flush(&ofproto
->up
);
8109 unixctl_command_reply(conn
, ds_cstr(&ds
));
8114 ofproto_dpif_unixctl_init(void)
8116 static bool registered
;
8122 unixctl_command_register(
8124 "bridge {priority tun_id in_port mark packet | odp_flow [-generate]}",
8125 2, 6, ofproto_unixctl_trace
, NULL
);
8126 unixctl_command_register("fdb/flush", "[bridge]", 0, 1,
8127 ofproto_unixctl_fdb_flush
, NULL
);
8128 unixctl_command_register("fdb/show", "bridge", 1, 1,
8129 ofproto_unixctl_fdb_show
, NULL
);
8130 unixctl_command_register("ofproto/clog", "", 0, 0,
8131 ofproto_dpif_clog
, NULL
);
8132 unixctl_command_register("ofproto/unclog", "", 0, 0,
8133 ofproto_dpif_unclog
, NULL
);
8134 unixctl_command_register("ofproto/self-check", "[bridge]", 0, 1,
8135 ofproto_dpif_self_check
, NULL
);
8136 unixctl_command_register("dpif/dump-dps", "", 0, 0,
8137 ofproto_unixctl_dpif_dump_dps
, NULL
);
8138 unixctl_command_register("dpif/show", "[bridge]", 0, INT_MAX
,
8139 ofproto_unixctl_dpif_show
, NULL
);
8140 unixctl_command_register("dpif/dump-flows", "bridge", 1, 1,
8141 ofproto_unixctl_dpif_dump_flows
, NULL
);
8142 unixctl_command_register("dpif/del-flows", "bridge", 1, 1,
8143 ofproto_unixctl_dpif_del_flows
, NULL
);
8146 /* Linux VLAN device support (e.g. "eth0.10" for VLAN 10.)
8148 * This is deprecated. It is only for compatibility with broken device drivers
8149 * in old versions of Linux that do not properly support VLANs when VLAN
8150 * devices are not used. When broken device drivers are no longer in
8151 * widespread use, we will delete these interfaces. */
8154 set_realdev(struct ofport
*ofport_
, uint16_t realdev_ofp_port
, int vid
)
8156 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofport_
->ofproto
);
8157 struct ofport_dpif
*ofport
= ofport_dpif_cast(ofport_
);
8159 if (realdev_ofp_port
== ofport
->realdev_ofp_port
8160 && vid
== ofport
->vlandev_vid
) {
8164 ofproto
->backer
->need_revalidate
= REV_RECONFIGURE
;
8166 if (ofport
->realdev_ofp_port
) {
8169 if (realdev_ofp_port
&& ofport
->bundle
) {
8170 /* vlandevs are enslaved to their realdevs, so they are not allowed to
8171 * themselves be part of a bundle. */
8172 bundle_set(ofport
->up
.ofproto
, ofport
->bundle
, NULL
);
8175 ofport
->realdev_ofp_port
= realdev_ofp_port
;
8176 ofport
->vlandev_vid
= vid
;
8178 if (realdev_ofp_port
) {
8179 vsp_add(ofport
, realdev_ofp_port
, vid
);
8186 hash_realdev_vid(uint16_t realdev_ofp_port
, int vid
)
8188 return hash_2words(realdev_ofp_port
, vid
);
8191 /* Returns the ODP port number of the Linux VLAN device that corresponds to
8192 * 'vlan_tci' on the network device with port number 'realdev_odp_port' in
8193 * 'ofproto'. For example, given 'realdev_odp_port' of eth0 and 'vlan_tci' 9,
8194 * it would return the port number of eth0.9.
8196 * Unless VLAN splinters are enabled for port 'realdev_odp_port', this
8197 * function just returns its 'realdev_odp_port' argument. */
8199 vsp_realdev_to_vlandev(const struct ofproto_dpif
*ofproto
,
8200 uint32_t realdev_odp_port
, ovs_be16 vlan_tci
)
8202 if (!hmap_is_empty(&ofproto
->realdev_vid_map
)) {
8203 uint16_t realdev_ofp_port
;
8204 int vid
= vlan_tci_to_vid(vlan_tci
);
8205 const struct vlan_splinter
*vsp
;
8207 realdev_ofp_port
= odp_port_to_ofp_port(ofproto
, realdev_odp_port
);
8208 HMAP_FOR_EACH_WITH_HASH (vsp
, realdev_vid_node
,
8209 hash_realdev_vid(realdev_ofp_port
, vid
),
8210 &ofproto
->realdev_vid_map
) {
8211 if (vsp
->realdev_ofp_port
== realdev_ofp_port
8212 && vsp
->vid
== vid
) {
8213 return ofp_port_to_odp_port(ofproto
, vsp
->vlandev_ofp_port
);
8217 return realdev_odp_port
;
8220 static struct vlan_splinter
*
8221 vlandev_find(const struct ofproto_dpif
*ofproto
, uint16_t vlandev_ofp_port
)
8223 struct vlan_splinter
*vsp
;
8225 HMAP_FOR_EACH_WITH_HASH (vsp
, vlandev_node
, hash_int(vlandev_ofp_port
, 0),
8226 &ofproto
->vlandev_map
) {
8227 if (vsp
->vlandev_ofp_port
== vlandev_ofp_port
) {
8235 /* Returns the OpenFlow port number of the "real" device underlying the Linux
8236 * VLAN device with OpenFlow port number 'vlandev_ofp_port' and stores the
8237 * VLAN VID of the Linux VLAN device in '*vid'. For example, given
8238 * 'vlandev_ofp_port' of eth0.9, it would return the OpenFlow port number of
8239 * eth0 and store 9 in '*vid'.
8241 * Returns 0 and does not modify '*vid' if 'vlandev_ofp_port' is not a Linux
8242 * VLAN device. Unless VLAN splinters are enabled, this is what this function
8245 vsp_vlandev_to_realdev(const struct ofproto_dpif
*ofproto
,
8246 uint16_t vlandev_ofp_port
, int *vid
)
8248 if (!hmap_is_empty(&ofproto
->vlandev_map
)) {
8249 const struct vlan_splinter
*vsp
;
8251 vsp
= vlandev_find(ofproto
, vlandev_ofp_port
);
8256 return vsp
->realdev_ofp_port
;
8262 /* Given 'flow', a flow representing a packet received on 'ofproto', checks
8263 * whether 'flow->in_port' represents a Linux VLAN device. If so, changes
8264 * 'flow->in_port' to the "real" device backing the VLAN device, sets
8265 * 'flow->vlan_tci' to the VLAN VID, and returns true. Otherwise (which is
8266 * always the case unless VLAN splinters are enabled), returns false without
8267 * making any changes. */
8269 vsp_adjust_flow(const struct ofproto_dpif
*ofproto
, struct flow
*flow
)
8274 realdev
= vsp_vlandev_to_realdev(ofproto
, flow
->in_port
, &vid
);
8279 /* Cause the flow to be processed as if it came in on the real device with
8280 * the VLAN device's VLAN ID. */
8281 flow
->in_port
= realdev
;
8282 flow
->vlan_tci
= htons((vid
& VLAN_VID_MASK
) | VLAN_CFI
);
8287 vsp_remove(struct ofport_dpif
*port
)
8289 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(port
->up
.ofproto
);
8290 struct vlan_splinter
*vsp
;
8292 vsp
= vlandev_find(ofproto
, port
->up
.ofp_port
);
8294 hmap_remove(&ofproto
->vlandev_map
, &vsp
->vlandev_node
);
8295 hmap_remove(&ofproto
->realdev_vid_map
, &vsp
->realdev_vid_node
);
8298 port
->realdev_ofp_port
= 0;
8300 VLOG_ERR("missing vlan device record");
8305 vsp_add(struct ofport_dpif
*port
, uint16_t realdev_ofp_port
, int vid
)
8307 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(port
->up
.ofproto
);
8309 if (!vsp_vlandev_to_realdev(ofproto
, port
->up
.ofp_port
, NULL
)
8310 && (vsp_realdev_to_vlandev(ofproto
, realdev_ofp_port
, htons(vid
))
8311 == realdev_ofp_port
)) {
8312 struct vlan_splinter
*vsp
;
8314 vsp
= xmalloc(sizeof *vsp
);
8315 hmap_insert(&ofproto
->vlandev_map
, &vsp
->vlandev_node
,
8316 hash_int(port
->up
.ofp_port
, 0));
8317 hmap_insert(&ofproto
->realdev_vid_map
, &vsp
->realdev_vid_node
,
8318 hash_realdev_vid(realdev_ofp_port
, vid
));
8319 vsp
->realdev_ofp_port
= realdev_ofp_port
;
8320 vsp
->vlandev_ofp_port
= port
->up
.ofp_port
;
8323 port
->realdev_ofp_port
= realdev_ofp_port
;
8325 VLOG_ERR("duplicate vlan device record");
8330 ofp_port_to_odp_port(const struct ofproto_dpif
*ofproto
, uint16_t ofp_port
)
8332 const struct ofport_dpif
*ofport
= get_ofp_port(ofproto
, ofp_port
);
8333 return ofport
? ofport
->odp_port
: OVSP_NONE
;
8336 static struct ofport_dpif
*
8337 odp_port_to_ofport(const struct dpif_backer
*backer
, uint32_t odp_port
)
8339 struct ofport_dpif
*port
;
8341 HMAP_FOR_EACH_IN_BUCKET (port
, odp_port_node
,
8342 hash_int(odp_port
, 0),
8343 &backer
->odp_to_ofport_map
) {
8344 if (port
->odp_port
== odp_port
) {
8353 odp_port_to_ofp_port(const struct ofproto_dpif
*ofproto
, uint32_t odp_port
)
8355 struct ofport_dpif
*port
;
8357 port
= odp_port_to_ofport(ofproto
->backer
, odp_port
);
8358 if (port
&& &ofproto
->up
== port
->up
.ofproto
) {
8359 return port
->up
.ofp_port
;
8365 const struct ofproto_class ofproto_dpif_class
= {
8400 port_is_lacp_current
,
8401 NULL
, /* rule_choose_table */
8408 rule_modify_actions
,
8419 get_stp_port_status
,
8426 is_mirror_output_bundle
,
8427 forward_bpdu_changed
,
8428 set_mac_table_config
,