2 * Copyright (c) 2009, 2010, 2011, 2012, 2013 Nicira, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
19 #include "ofproto/ofproto-provider.h"
26 #include "byte-order.h"
31 #include "dynamic-string.h"
32 #include "fail-open.h"
36 #include "mac-learning.h"
37 #include "meta-flow.h"
38 #include "multipath.h"
39 #include "netdev-vport.h"
46 #include "ofp-actions.h"
47 #include "ofp-parse.h"
48 #include "ofp-print.h"
49 #include "ofproto-dpif-governor.h"
50 #include "ofproto-dpif-ipfix.h"
51 #include "ofproto-dpif-sflow.h"
52 #include "poll-loop.h"
57 #include "unaligned.h"
59 #include "vlan-bitmap.h"
62 VLOG_DEFINE_THIS_MODULE(ofproto_dpif
);
64 COVERAGE_DEFINE(ofproto_dpif_expired
);
65 COVERAGE_DEFINE(ofproto_dpif_xlate
);
66 COVERAGE_DEFINE(facet_changed_rule
);
67 COVERAGE_DEFINE(facet_revalidate
);
68 COVERAGE_DEFINE(facet_unexpected
);
69 COVERAGE_DEFINE(facet_suppress
);
71 /* Maximum depth of flow table recursion (due to resubmit actions) in a
72 * flow translation. */
73 #define MAX_RESUBMIT_RECURSION 64
75 /* Number of implemented OpenFlow tables. */
76 enum { N_TABLES
= 255 };
77 enum { TBL_INTERNAL
= N_TABLES
- 1 }; /* Used for internal hidden rules. */
78 BUILD_ASSERT_DECL(N_TABLES
>= 2 && N_TABLES
<= 255);
90 * - Do include packets and bytes from facets that have been deleted or
91 * whose own statistics have been folded into the rule.
93 * - Do include packets and bytes sent "by hand" that were accounted to
94 * the rule without any facet being involved (this is a rare corner
95 * case in rule_execute()).
97 * - Do not include packet or bytes that can be obtained from any facet's
98 * packet_count or byte_count member or that can be obtained from the
99 * datapath by, e.g., dpif_flow_get() for any subfacet.
101 uint64_t packet_count
; /* Number of packets received. */
102 uint64_t byte_count
; /* Number of bytes received. */
104 tag_type tag
; /* Caches rule_calculate_tag() result. */
106 struct list facets
; /* List of "struct facet"s. */
109 static struct rule_dpif
*rule_dpif_cast(const struct rule
*rule
)
111 return rule
? CONTAINER_OF(rule
, struct rule_dpif
, up
) : NULL
;
114 static struct rule_dpif
*rule_dpif_lookup(struct ofproto_dpif
*,
115 const struct flow
*);
116 static struct rule_dpif
*rule_dpif_lookup__(struct ofproto_dpif
*,
119 static struct rule_dpif
*rule_dpif_miss_rule(struct ofproto_dpif
*ofproto
,
120 const struct flow
*flow
);
122 static void rule_get_stats(struct rule
*, uint64_t *packets
, uint64_t *bytes
);
123 static void rule_credit_stats(struct rule_dpif
*,
124 const struct dpif_flow_stats
*);
125 static tag_type
rule_calculate_tag(const struct flow
*,
126 const struct minimask
*, uint32_t basis
);
127 static void rule_invalidate(const struct rule_dpif
*);
129 #define MAX_MIRRORS 32
130 typedef uint32_t mirror_mask_t
;
131 #define MIRROR_MASK_C(X) UINT32_C(X)
132 BUILD_ASSERT_DECL(sizeof(mirror_mask_t
) * CHAR_BIT
>= MAX_MIRRORS
);
134 struct ofproto_dpif
*ofproto
; /* Owning ofproto. */
135 size_t idx
; /* In ofproto's "mirrors" array. */
136 void *aux
; /* Key supplied by ofproto's client. */
137 char *name
; /* Identifier for log messages. */
139 /* Selection criteria. */
140 struct hmapx srcs
; /* Contains "struct ofbundle *"s. */
141 struct hmapx dsts
; /* Contains "struct ofbundle *"s. */
142 unsigned long *vlans
; /* Bitmap of chosen VLANs, NULL selects all. */
144 /* Output (exactly one of out == NULL and out_vlan == -1 is true). */
145 struct ofbundle
*out
; /* Output port or NULL. */
146 int out_vlan
; /* Output VLAN or -1. */
147 mirror_mask_t dup_mirrors
; /* Bitmap of mirrors with the same output. */
150 int64_t packet_count
; /* Number of packets sent. */
151 int64_t byte_count
; /* Number of bytes sent. */
154 static void mirror_destroy(struct ofmirror
*);
155 static void update_mirror_stats(struct ofproto_dpif
*ofproto
,
156 mirror_mask_t mirrors
,
157 uint64_t packets
, uint64_t bytes
);
160 struct hmap_node hmap_node
; /* In struct ofproto's "bundles" hmap. */
161 struct ofproto_dpif
*ofproto
; /* Owning ofproto. */
162 void *aux
; /* Key supplied by ofproto's client. */
163 char *name
; /* Identifier for log messages. */
166 struct list ports
; /* Contains "struct ofport"s. */
167 enum port_vlan_mode vlan_mode
; /* VLAN mode */
168 int vlan
; /* -1=trunk port, else a 12-bit VLAN ID. */
169 unsigned long *trunks
; /* Bitmap of trunked VLANs, if 'vlan' == -1.
170 * NULL if all VLANs are trunked. */
171 struct lacp
*lacp
; /* LACP if LACP is enabled, otherwise NULL. */
172 struct bond
*bond
; /* Nonnull iff more than one port. */
173 bool use_priority_tags
; /* Use 802.1p tag for frames in VLAN 0? */
176 bool floodable
; /* True if no port has OFPUTIL_PC_NO_FLOOD set. */
178 /* Port mirroring info. */
179 mirror_mask_t src_mirrors
; /* Mirrors triggered when packet received. */
180 mirror_mask_t dst_mirrors
; /* Mirrors triggered when packet sent. */
181 mirror_mask_t mirror_out
; /* Mirrors that output to this bundle. */
184 static void bundle_remove(struct ofport
*);
185 static void bundle_update(struct ofbundle
*);
186 static void bundle_destroy(struct ofbundle
*);
187 static void bundle_del_port(struct ofport_dpif
*);
188 static void bundle_run(struct ofbundle
*);
189 static void bundle_wait(struct ofbundle
*);
190 static struct ofbundle
*lookup_input_bundle(const struct ofproto_dpif
*,
191 uint16_t in_port
, bool warn
,
192 struct ofport_dpif
**in_ofportp
);
194 /* A controller may use OFPP_NONE as the ingress port to indicate that
195 * it did not arrive on a "real" port. 'ofpp_none_bundle' exists for
196 * when an input bundle is needed for validation (e.g., mirroring or
197 * OFPP_NORMAL processing). It is not connected to an 'ofproto' or have
198 * any 'port' structs, so care must be taken when dealing with it. */
199 static struct ofbundle ofpp_none_bundle
= {
201 .vlan_mode
= PORT_VLAN_TRUNK
204 static void stp_run(struct ofproto_dpif
*ofproto
);
205 static void stp_wait(struct ofproto_dpif
*ofproto
);
206 static int set_stp_port(struct ofport
*,
207 const struct ofproto_port_stp_settings
*);
209 static bool ofbundle_includes_vlan(const struct ofbundle
*, uint16_t vlan
);
213 /* Initial values of fields of the packet that may be changed during
214 * flow processing and needed later. */
215 struct initial_vals
{
216 /* This is the value of vlan_tci in the packet as actually received from
217 * dpif. This is the same as the facet's flow.vlan_tci unless the packet
218 * was received via a VLAN splinter. In that case, this value is 0
219 * (because the packet as actually received from the dpif had no 802.1Q
220 * tag) but the facet's flow.vlan_tci is set to the VLAN that the splinter
223 * This member should be removed when the VLAN splinters feature is no
229 tag_type tags
; /* Tags associated with actions. */
230 enum slow_path_reason slow
; /* 0 if fast path may be used. */
231 bool has_learn
; /* Actions include NXAST_LEARN? */
232 bool has_normal
; /* Actions output to OFPP_NORMAL? */
233 bool has_fin_timeout
; /* Actions include NXAST_FIN_TIMEOUT? */
234 uint16_t nf_output_iface
; /* Output interface index for NetFlow. */
235 mirror_mask_t mirrors
; /* Bitmap of associated mirrors. */
237 uint64_t odp_actions_stub
[256 / 8];
238 struct ofpbuf odp_actions
;
242 struct ofproto_dpif
*ofproto
;
244 /* Flow to which the OpenFlow actions apply. xlate_actions() will modify
245 * this flow when actions change header fields. */
248 struct initial_vals initial_vals
;
250 /* The packet corresponding to 'flow', or a null pointer if we are
251 * revalidating without a packet to refer to. */
252 const struct ofpbuf
*packet
;
254 /* Should OFPP_NORMAL update the MAC learning table? Should "learn"
255 * actions update the flow table?
257 * We want to update these tables if we are actually processing a packet,
258 * or if we are accounting for packets that the datapath has processed, but
259 * not if we are just revalidating. */
262 /* The rule initiating translation or NULL. */
263 struct rule_dpif
*rule
;
265 /* The actions to translate. If 'rule' is not NULL, these may be NULL. */
266 const struct ofpact
*ofpacts
;
269 /* Union of the set of TCP flags seen so far in this flow. (Used only by
270 * NXAST_FIN_TIMEOUT. Set to zero to avoid updating updating rules'
274 /* If nonnull, flow translation calls this function just before executing a
275 * resubmit or OFPP_TABLE action. In addition, disables logging of traces
276 * when the recursion depth is exceeded.
278 * 'rule' is the rule being submitted into. It will be null if the
279 * resubmit or OFPP_TABLE action didn't find a matching rule.
281 * This is normally null so the client has to set it manually after
282 * calling xlate_in_init(). */
283 void (*resubmit_hook
)(struct xlate_ctx
*, struct rule_dpif
*rule
);
285 /* If nonnull, flow translation calls this function to report some
286 * significant decision, e.g. to explain why OFPP_NORMAL translation
287 * dropped a packet. */
288 void (*report_hook
)(struct xlate_ctx
*, const char *s
);
290 /* If nonnull, flow translation credits the specified statistics to each
291 * rule reached through a resubmit or OFPP_TABLE action.
293 * This is normally null so the client has to set it manually after
294 * calling xlate_in_init(). */
295 const struct dpif_flow_stats
*resubmit_stats
;
298 /* Context used by xlate_actions() and its callees. */
300 struct xlate_in
*xin
;
301 struct xlate_out
*xout
;
303 struct ofproto_dpif
*ofproto
;
305 /* Flow at the last commit. */
306 struct flow base_flow
;
308 /* Tunnel IP destination address as received. This is stored separately
309 * as the base_flow.tunnel is cleared on init to reflect the datapath
310 * behavior. Used to make sure not to send tunneled output to ourselves,
311 * which might lead to an infinite loop. This could happen easily
312 * if a tunnel is marked as 'ip_remote=flow', and the flow does not
313 * actually set the tun_dst field. */
314 ovs_be32 orig_tunnel_ip_dst
;
316 /* Stack for the push and pop actions. Each stack element is of type
317 * "union mf_subvalue". */
318 union mf_subvalue init_stack
[1024 / sizeof(union mf_subvalue
)];
321 /* The rule that we are currently translating, or NULL. */
322 struct rule_dpif
*rule
;
324 int recurse
; /* Recursion level, via xlate_table_action. */
325 bool max_resubmit_trigger
; /* Recursed too deeply during translation. */
326 uint32_t orig_skb_priority
; /* Priority when packet arrived. */
327 uint8_t table_id
; /* OpenFlow table ID where flow was found. */
328 uint32_t sflow_n_outputs
; /* Number of output ports. */
329 uint32_t sflow_odp_port
; /* Output port for composing sFlow action. */
330 uint16_t user_cookie_offset
;/* Used for user_action_cookie fixup. */
331 bool exit
; /* No further actions should be processed. */
334 static void xlate_in_init(struct xlate_in
*, struct ofproto_dpif
*,
335 const struct flow
*, const struct initial_vals
*,
336 struct rule_dpif
*, uint8_t tcp_flags
,
337 const struct ofpbuf
*);
339 static void xlate_out_uninit(struct xlate_out
*);
341 static void xlate_actions(struct xlate_in
*, struct xlate_out
*);
343 static void xlate_actions_for_side_effects(struct xlate_in
*);
345 static void xlate_table_action(struct xlate_ctx
*, uint16_t in_port
,
346 uint8_t table_id
, bool may_packet_in
);
348 static size_t put_userspace_action(const struct ofproto_dpif
*,
349 struct ofpbuf
*odp_actions
,
351 const union user_action_cookie
*,
354 static void compose_slow_path(const struct ofproto_dpif
*, const struct flow
*,
355 enum slow_path_reason
,
356 uint64_t *stub
, size_t stub_size
,
357 const struct nlattr
**actionsp
,
358 size_t *actions_lenp
);
360 static void xlate_report(struct xlate_ctx
*ctx
, const char *s
);
362 /* A subfacet (see "struct subfacet" below) has three possible installation
365 * - SF_NOT_INSTALLED: Not installed in the datapath. This will only be the
366 * case just after the subfacet is created, just before the subfacet is
367 * destroyed, or if the datapath returns an error when we try to install a
370 * - SF_FAST_PATH: The subfacet's actions are installed in the datapath.
372 * - SF_SLOW_PATH: An action that sends every packet for the subfacet through
373 * ofproto_dpif is installed in the datapath.
376 SF_NOT_INSTALLED
, /* No datapath flow for this subfacet. */
377 SF_FAST_PATH
, /* Full actions are installed. */
378 SF_SLOW_PATH
, /* Send-to-userspace action is installed. */
381 /* A dpif flow and actions associated with a facet.
383 * See also the large comment on struct facet. */
386 struct hmap_node hmap_node
; /* In struct ofproto_dpif 'subfacets' list. */
387 struct list list_node
; /* In struct facet's 'facets' list. */
388 struct facet
*facet
; /* Owning facet. */
390 enum odp_key_fitness key_fitness
;
394 long long int used
; /* Time last used; time created if not used. */
395 long long int created
; /* Time created. */
397 uint64_t dp_packet_count
; /* Last known packet count in the datapath. */
398 uint64_t dp_byte_count
; /* Last known byte count in the datapath. */
400 enum subfacet_path path
; /* Installed in datapath? */
403 #define SUBFACET_DESTROY_MAX_BATCH 50
405 static struct subfacet
*subfacet_create(struct facet
*, struct flow_miss
*miss
,
407 static struct subfacet
*subfacet_find(struct ofproto_dpif
*,
408 const struct nlattr
*key
, size_t key_len
,
410 static void subfacet_destroy(struct subfacet
*);
411 static void subfacet_destroy__(struct subfacet
*);
412 static void subfacet_destroy_batch(struct ofproto_dpif
*,
413 struct subfacet
**, int n
);
414 static void subfacet_reset_dp_stats(struct subfacet
*,
415 struct dpif_flow_stats
*);
416 static void subfacet_update_stats(struct subfacet
*,
417 const struct dpif_flow_stats
*);
418 static int subfacet_install(struct subfacet
*,
419 const struct ofpbuf
*odp_actions
,
420 struct dpif_flow_stats
*);
421 static void subfacet_uninstall(struct subfacet
*);
423 /* An exact-match instantiation of an OpenFlow flow.
425 * A facet associates a "struct flow", which represents the Open vSwitch
426 * userspace idea of an exact-match flow, with one or more subfacets. Each
427 * subfacet tracks the datapath's idea of the exact-match flow equivalent to
428 * the facet. When the kernel module (or other dpif implementation) and Open
429 * vSwitch userspace agree on the definition of a flow key, there is exactly
430 * one subfacet per facet. If the dpif implementation supports more-specific
431 * flow matching than userspace, however, a facet can have more than one
432 * subfacet, each of which corresponds to some distinction in flow that
433 * userspace simply doesn't understand.
435 * Flow expiration works in terms of subfacets, so a facet must have at least
436 * one subfacet or it will never expire, leaking memory. */
439 struct hmap_node hmap_node
; /* In owning ofproto's 'facets' hmap. */
440 struct list list_node
; /* In owning rule's 'facets' list. */
441 struct rule_dpif
*rule
; /* Owning rule. */
444 struct list subfacets
;
445 long long int used
; /* Time last used; time created if not used. */
452 * - Do include packets and bytes sent "by hand", e.g. with
455 * - Do include packets and bytes that were obtained from the datapath
456 * when a subfacet's statistics were reset (e.g. dpif_flow_put() with
457 * DPIF_FP_ZERO_STATS).
459 * - Do not include packets or bytes that can be obtained from the
460 * datapath for any existing subfacet.
462 uint64_t packet_count
; /* Number of packets received. */
463 uint64_t byte_count
; /* Number of bytes received. */
465 /* Resubmit statistics. */
466 uint64_t prev_packet_count
; /* Number of packets from last stats push. */
467 uint64_t prev_byte_count
; /* Number of bytes from last stats push. */
468 long long int prev_used
; /* Used time from last stats push. */
471 uint64_t accounted_bytes
; /* Bytes processed by facet_account(). */
472 struct netflow_flow nf_flow
; /* Per-flow NetFlow tracking data. */
473 uint8_t tcp_flags
; /* TCP flags seen for this 'rule'. */
475 struct xlate_out xout
;
477 /* Initial values of the packet that may be needed later. */
478 struct initial_vals initial_vals
;
480 /* Storage for a single subfacet, to reduce malloc() time and space
481 * overhead. (A facet always has at least one subfacet and in the common
482 * case has exactly one subfacet. However, 'one_subfacet' may not
483 * always be valid, since it could have been removed after newer
484 * subfacets were pushed onto the 'subfacets' list.) */
485 struct subfacet one_subfacet
;
487 long long int learn_rl
; /* Rate limiter for facet_learn(). */
490 static struct facet
*facet_create(const struct flow_miss
*, uint32_t hash
);
491 static void facet_remove(struct facet
*);
492 static void facet_free(struct facet
*);
494 static struct facet
*facet_find(struct ofproto_dpif
*,
495 const struct flow
*, uint32_t hash
);
496 static struct facet
*facet_lookup_valid(struct ofproto_dpif
*,
497 const struct flow
*, uint32_t hash
);
498 static bool facet_revalidate(struct facet
*);
499 static bool facet_check_consistency(struct facet
*);
501 static void facet_flush_stats(struct facet
*);
503 static void facet_reset_counters(struct facet
*);
504 static void facet_push_stats(struct facet
*, bool may_learn
);
505 static void facet_learn(struct facet
*);
506 static void facet_account(struct facet
*);
507 static void push_all_stats(void);
509 static bool facet_is_controller_flow(struct facet
*);
512 struct hmap_node odp_port_node
; /* In dpif_backer's "odp_to_ofport_map". */
516 struct ofbundle
*bundle
; /* Bundle that contains this port, if any. */
517 struct list bundle_node
; /* In struct ofbundle's "ports" list. */
518 struct cfm
*cfm
; /* Connectivity Fault Management, if any. */
519 struct bfd
*bfd
; /* BFD, if any. */
520 tag_type tag
; /* Tag associated with this port. */
521 bool may_enable
; /* May be enabled in bonds. */
522 long long int carrier_seq
; /* Carrier status changes. */
523 struct tnl_port
*tnl_port
; /* Tunnel handle, or null. */
526 struct stp_port
*stp_port
; /* Spanning Tree Protocol, if any. */
527 enum stp_state stp_state
; /* Always STP_DISABLED if STP not in use. */
528 long long int stp_state_entered
;
530 struct hmap priorities
; /* Map of attached 'priority_to_dscp's. */
532 /* Linux VLAN device support (e.g. "eth0.10" for VLAN 10.)
534 * This is deprecated. It is only for compatibility with broken device
535 * drivers in old versions of Linux that do not properly support VLANs when
536 * VLAN devices are not used. When broken device drivers are no longer in
537 * widespread use, we will delete these interfaces. */
538 uint16_t realdev_ofp_port
;
542 /* Node in 'ofport_dpif''s 'priorities' map. Used to maintain a map from
543 * 'priority' (the datapath's term for QoS queue) to the dscp bits which all
544 * traffic egressing the 'ofport' with that priority should be marked with. */
545 struct priority_to_dscp
{
546 struct hmap_node hmap_node
; /* Node in 'ofport_dpif''s 'priorities' map. */
547 uint32_t priority
; /* Priority of this queue (see struct flow). */
549 uint8_t dscp
; /* DSCP bits to mark outgoing traffic with. */
552 /* Linux VLAN device support (e.g. "eth0.10" for VLAN 10.)
554 * This is deprecated. It is only for compatibility with broken device drivers
555 * in old versions of Linux that do not properly support VLANs when VLAN
556 * devices are not used. When broken device drivers are no longer in
557 * widespread use, we will delete these interfaces. */
558 struct vlan_splinter
{
559 struct hmap_node realdev_vid_node
;
560 struct hmap_node vlandev_node
;
561 uint16_t realdev_ofp_port
;
562 uint16_t vlandev_ofp_port
;
566 static uint16_t vsp_realdev_to_vlandev(const struct ofproto_dpif
*,
567 uint16_t realdev_ofp_port
,
569 static bool vsp_adjust_flow(const struct ofproto_dpif
*, struct flow
*);
570 static void vsp_remove(struct ofport_dpif
*);
571 static void vsp_add(struct ofport_dpif
*, uint16_t realdev_ofp_port
, int vid
);
573 static uint32_t ofp_port_to_odp_port(const struct ofproto_dpif
*,
575 static uint16_t odp_port_to_ofp_port(const struct ofproto_dpif
*,
578 static struct ofport_dpif
*
579 ofport_dpif_cast(const struct ofport
*ofport
)
581 return ofport
? CONTAINER_OF(ofport
, struct ofport_dpif
, up
) : NULL
;
584 static void port_run(struct ofport_dpif
*);
585 static void port_run_fast(struct ofport_dpif
*);
586 static void port_wait(struct ofport_dpif
*);
587 static int set_bfd(struct ofport
*, const struct smap
*);
588 static int set_cfm(struct ofport
*, const struct cfm_settings
*);
589 static void ofport_clear_priorities(struct ofport_dpif
*);
590 static void run_fast_rl(void);
592 struct dpif_completion
{
593 struct list list_node
;
594 struct ofoperation
*op
;
597 /* Extra information about a classifier table.
598 * Currently used just for optimized flow revalidation. */
600 /* If either of these is nonnull, then this table has a form that allows
601 * flows to be tagged to avoid revalidating most flows for the most common
602 * kinds of flow table changes. */
603 struct cls_table
*catchall_table
; /* Table that wildcards all fields. */
604 struct cls_table
*other_table
; /* Table with any other wildcard set. */
605 uint32_t basis
; /* Keeps each table's tags separate. */
608 /* Reasons that we might need to revalidate every facet, and corresponding
611 * A value of 0 means that there is no need to revalidate.
613 * It would be nice to have some cleaner way to integrate with coverage
614 * counters, but with only a few reasons I guess this is good enough for
616 enum revalidate_reason
{
617 REV_RECONFIGURE
= 1, /* Switch configuration changed. */
618 REV_STP
, /* Spanning tree protocol port status change. */
619 REV_PORT_TOGGLED
, /* Port enabled or disabled by CFM, LACP, ...*/
620 REV_FLOW_TABLE
, /* Flow table changed. */
621 REV_INCONSISTENCY
/* Facet self-check failed. */
623 COVERAGE_DEFINE(rev_reconfigure
);
624 COVERAGE_DEFINE(rev_stp
);
625 COVERAGE_DEFINE(rev_port_toggled
);
626 COVERAGE_DEFINE(rev_flow_table
);
627 COVERAGE_DEFINE(rev_inconsistency
);
629 /* Drop keys are odp flow keys which have drop flows installed in the kernel.
630 * These are datapath flows which have no associated ofproto, if they did we
631 * would use facets. */
633 struct hmap_node hmap_node
;
638 /* All datapaths of a given type share a single dpif backer instance. */
643 struct timer next_expiration
;
644 struct hmap odp_to_ofport_map
; /* ODP port to ofport mapping. */
646 struct simap tnl_backers
; /* Set of dpif ports backing tunnels. */
648 /* Facet revalidation flags applying to facets which use this backer. */
649 enum revalidate_reason need_revalidate
; /* Revalidate every facet. */
650 struct tag_set revalidate_set
; /* Revalidate only matching facets. */
652 struct hmap drop_keys
; /* Set of dropped odp keys. */
653 bool recv_set_enable
; /* Enables or disables receiving packets. */
656 /* All existing ofproto_backer instances, indexed by ofproto->up.type. */
657 static struct shash all_dpif_backers
= SHASH_INITIALIZER(&all_dpif_backers
);
659 static void drop_key_clear(struct dpif_backer
*);
660 static struct ofport_dpif
*
661 odp_port_to_ofport(const struct dpif_backer
*, uint32_t odp_port
);
663 struct avg_subfacet_rates
{
664 double add_rate
; /* Moving average of new flows created per minute. */
665 double del_rate
; /* Moving average of flows deleted per minute. */
667 static void show_dp_rates(struct ds
*ds
, const char *heading
,
668 const struct avg_subfacet_rates
*rates
);
669 static void exp_mavg(double *avg
, int base
, double new);
671 struct ofproto_dpif
{
672 struct hmap_node all_ofproto_dpifs_node
; /* In 'all_ofproto_dpifs'. */
674 struct dpif_backer
*backer
;
676 /* Special OpenFlow rules. */
677 struct rule_dpif
*miss_rule
; /* Sends flow table misses to controller. */
678 struct rule_dpif
*no_packet_in_rule
; /* Drops flow table misses. */
679 struct rule_dpif
*drop_frags_rule
; /* Used in OFPC_FRAG_DROP mode. */
682 struct netflow
*netflow
;
683 struct dpif_sflow
*sflow
;
684 struct dpif_ipfix
*ipfix
;
685 struct hmap bundles
; /* Contains "struct ofbundle"s. */
686 struct mac_learning
*ml
;
687 struct ofmirror
*mirrors
[MAX_MIRRORS
];
689 bool has_bonded_bundles
;
693 struct hmap subfacets
;
694 struct governor
*governor
;
695 long long int consistency_rl
;
698 struct table_dpif tables
[N_TABLES
];
700 /* Support for debugging async flow mods. */
701 struct list completions
;
703 bool has_bundle_action
; /* True when the first bundle action appears. */
704 struct netdev_stats stats
; /* To account packets generated and consumed in
709 long long int stp_last_tick
;
711 /* VLAN splinters. */
712 struct hmap realdev_vid_map
; /* (realdev,vid) -> vlandev. */
713 struct hmap vlandev_map
; /* vlandev -> (realdev,vid). */
716 struct sset ports
; /* Set of standard port names. */
717 struct sset ghost_ports
; /* Ports with no datapath port. */
718 struct sset port_poll_set
; /* Queued names for port_poll() reply. */
719 int port_poll_errno
; /* Last errno for port_poll() reply. */
721 /* Per ofproto's dpif stats. */
725 /* Subfacet statistics.
727 * These keep track of the total number of subfacets added and deleted and
728 * flow life span. They are useful for computing the flow rates stats
729 * exposed via "ovs-appctl dpif/show". The goal is to learn about
730 * traffic patterns in ways that we can use later to improve Open vSwitch
731 * performance in new situations. */
732 long long int created
; /* Time when it is created. */
733 unsigned int max_n_subfacet
; /* Maximum number of flows */
735 /* The average number of subfacets... */
736 struct avg_subfacet_rates hourly
; /* ...over the last hour. */
737 struct avg_subfacet_rates daily
; /* ...over the last day. */
738 long long int last_minute
; /* Last time 'hourly' was updated. */
740 /* Number of subfacets added or deleted since 'last_minute'. */
741 unsigned int subfacet_add_count
;
742 unsigned int subfacet_del_count
;
744 /* Number of subfacets added or deleted from 'created' to 'last_minute.' */
745 unsigned long long int total_subfacet_add_count
;
746 unsigned long long int total_subfacet_del_count
;
748 /* Sum of the number of milliseconds that each subfacet existed,
749 * over the subfacets that have been added and then later deleted. */
750 unsigned long long int total_subfacet_life_span
;
752 /* Incremented by the number of currently existing subfacets, each
753 * time we pull statistics from the kernel. */
754 unsigned long long int total_subfacet_count
;
756 /* Number of times we pull statistics from the kernel. */
757 unsigned long long int n_update_stats
;
759 static unsigned long long int avg_subfacet_life_span(
760 const struct ofproto_dpif
*);
761 static double avg_subfacet_count(const struct ofproto_dpif
*ofproto
);
762 static void update_moving_averages(struct ofproto_dpif
*ofproto
);
763 static void update_max_subfacet_count(struct ofproto_dpif
*ofproto
);
765 /* Defer flow mod completion until "ovs-appctl ofproto/unclog"? (Useful only
766 * for debugging the asynchronous flow_mod implementation.) */
769 /* All existing ofproto_dpif instances, indexed by ->up.name. */
770 static struct hmap all_ofproto_dpifs
= HMAP_INITIALIZER(&all_ofproto_dpifs
);
772 static void ofproto_dpif_unixctl_init(void);
774 static struct ofproto_dpif
*
775 ofproto_dpif_cast(const struct ofproto
*ofproto
)
777 ovs_assert(ofproto
->ofproto_class
== &ofproto_dpif_class
);
778 return CONTAINER_OF(ofproto
, struct ofproto_dpif
, up
);
781 static struct ofport_dpif
*get_ofp_port(const struct ofproto_dpif
*,
783 static struct ofport_dpif
*get_odp_port(const struct ofproto_dpif
*,
785 static void ofproto_trace(struct ofproto_dpif
*, const struct flow
*,
786 const struct ofpbuf
*,
787 const struct initial_vals
*, struct ds
*);
789 /* Packet processing. */
790 static void update_learning_table(struct ofproto_dpif
*,
791 const struct flow
*, int vlan
,
794 #define FLOW_MISS_MAX_BATCH 50
795 static int handle_upcalls(struct dpif_backer
*, unsigned int max_batch
);
797 /* Flow expiration. */
798 static int expire(struct dpif_backer
*);
801 static void send_netflow_active_timeouts(struct ofproto_dpif
*);
804 static int send_packet(const struct ofport_dpif
*, struct ofpbuf
*packet
);
805 static size_t compose_sflow_action(const struct ofproto_dpif
*,
806 struct ofpbuf
*odp_actions
,
807 const struct flow
*, uint32_t odp_port
);
808 static void compose_ipfix_action(const struct ofproto_dpif
*,
809 struct ofpbuf
*odp_actions
,
810 const struct flow
*);
811 static void add_mirror_actions(struct xlate_ctx
*ctx
,
812 const struct flow
*flow
);
813 /* Global variables. */
814 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(1, 5);
816 /* Initial mappings of port to bridge mappings. */
817 static struct shash init_ofp_ports
= SHASH_INITIALIZER(&init_ofp_ports
);
819 /* Factory functions. */
822 init(const struct shash
*iface_hints
)
824 struct shash_node
*node
;
826 /* Make a local copy, since we don't own 'iface_hints' elements. */
827 SHASH_FOR_EACH(node
, iface_hints
) {
828 const struct iface_hint
*orig_hint
= node
->data
;
829 struct iface_hint
*new_hint
= xmalloc(sizeof *new_hint
);
831 new_hint
->br_name
= xstrdup(orig_hint
->br_name
);
832 new_hint
->br_type
= xstrdup(orig_hint
->br_type
);
833 new_hint
->ofp_port
= orig_hint
->ofp_port
;
835 shash_add(&init_ofp_ports
, node
->name
, new_hint
);
840 enumerate_types(struct sset
*types
)
842 dp_enumerate_types(types
);
846 enumerate_names(const char *type
, struct sset
*names
)
848 struct ofproto_dpif
*ofproto
;
851 HMAP_FOR_EACH (ofproto
, all_ofproto_dpifs_node
, &all_ofproto_dpifs
) {
852 if (strcmp(type
, ofproto
->up
.type
)) {
855 sset_add(names
, ofproto
->up
.name
);
862 del(const char *type
, const char *name
)
867 error
= dpif_open(name
, type
, &dpif
);
869 error
= dpif_delete(dpif
);
876 port_open_type(const char *datapath_type
, const char *port_type
)
878 return dpif_port_open_type(datapath_type
, port_type
);
881 /* Type functions. */
883 static struct ofproto_dpif
*
884 lookup_ofproto_dpif_by_port_name(const char *name
)
886 struct ofproto_dpif
*ofproto
;
888 HMAP_FOR_EACH (ofproto
, all_ofproto_dpifs_node
, &all_ofproto_dpifs
) {
889 if (sset_contains(&ofproto
->ports
, name
)) {
898 type_run(const char *type
)
900 static long long int push_timer
= LLONG_MIN
;
901 struct dpif_backer
*backer
;
905 backer
= shash_find_data(&all_dpif_backers
, type
);
907 /* This is not necessarily a problem, since backers are only
908 * created on demand. */
912 dpif_run(backer
->dpif
);
914 /* The most natural place to push facet statistics is when they're pulled
915 * from the datapath. However, when there are many flows in the datapath,
916 * this expensive operation can occur so frequently, that it reduces our
917 * ability to quickly set up flows. To reduce the cost, we push statistics
919 if (time_msec() > push_timer
) {
920 push_timer
= time_msec() + 2000;
924 /* If vswitchd started with other_config:flow_restore_wait set as "true",
925 * and the configuration has now changed to "false", enable receiving
926 * packets from the datapath. */
927 if (!backer
->recv_set_enable
&& !ofproto_get_flow_restore_wait()) {
928 backer
->recv_set_enable
= true;
930 error
= dpif_recv_set(backer
->dpif
, backer
->recv_set_enable
);
932 VLOG_ERR("Failed to enable receiving packets in dpif.");
935 dpif_flow_flush(backer
->dpif
);
936 backer
->need_revalidate
= REV_RECONFIGURE
;
939 if (backer
->need_revalidate
940 || !tag_set_is_empty(&backer
->revalidate_set
)) {
941 struct tag_set revalidate_set
= backer
->revalidate_set
;
942 bool need_revalidate
= backer
->need_revalidate
;
943 struct ofproto_dpif
*ofproto
;
944 struct simap_node
*node
;
945 struct simap tmp_backers
;
947 /* Handle tunnel garbage collection. */
948 simap_init(&tmp_backers
);
949 simap_swap(&backer
->tnl_backers
, &tmp_backers
);
951 HMAP_FOR_EACH (ofproto
, all_ofproto_dpifs_node
, &all_ofproto_dpifs
) {
952 struct ofport_dpif
*iter
;
954 if (backer
!= ofproto
->backer
) {
958 HMAP_FOR_EACH (iter
, up
.hmap_node
, &ofproto
->up
.ports
) {
961 if (!iter
->tnl_port
) {
965 dp_port
= netdev_vport_get_dpif_port(iter
->up
.netdev
);
966 node
= simap_find(&tmp_backers
, dp_port
);
968 simap_put(&backer
->tnl_backers
, dp_port
, node
->data
);
969 simap_delete(&tmp_backers
, node
);
970 node
= simap_find(&backer
->tnl_backers
, dp_port
);
972 node
= simap_find(&backer
->tnl_backers
, dp_port
);
974 uint32_t odp_port
= UINT32_MAX
;
976 if (!dpif_port_add(backer
->dpif
, iter
->up
.netdev
,
978 simap_put(&backer
->tnl_backers
, dp_port
, odp_port
);
979 node
= simap_find(&backer
->tnl_backers
, dp_port
);
984 iter
->odp_port
= node
? node
->data
: OVSP_NONE
;
985 if (tnl_port_reconfigure(&iter
->up
, iter
->odp_port
,
987 backer
->need_revalidate
= REV_RECONFIGURE
;
992 SIMAP_FOR_EACH (node
, &tmp_backers
) {
993 dpif_port_del(backer
->dpif
, node
->data
);
995 simap_destroy(&tmp_backers
);
997 switch (backer
->need_revalidate
) {
998 case REV_RECONFIGURE
: COVERAGE_INC(rev_reconfigure
); break;
999 case REV_STP
: COVERAGE_INC(rev_stp
); break;
1000 case REV_PORT_TOGGLED
: COVERAGE_INC(rev_port_toggled
); break;
1001 case REV_FLOW_TABLE
: COVERAGE_INC(rev_flow_table
); break;
1002 case REV_INCONSISTENCY
: COVERAGE_INC(rev_inconsistency
); break;
1005 if (backer
->need_revalidate
) {
1006 /* Clear the drop_keys in case we should now be accepting some
1007 * formerly dropped flows. */
1008 drop_key_clear(backer
);
1011 /* Clear the revalidation flags. */
1012 tag_set_init(&backer
->revalidate_set
);
1013 backer
->need_revalidate
= 0;
1015 HMAP_FOR_EACH (ofproto
, all_ofproto_dpifs_node
, &all_ofproto_dpifs
) {
1016 struct facet
*facet
, *next
;
1018 if (ofproto
->backer
!= backer
) {
1022 HMAP_FOR_EACH_SAFE (facet
, next
, hmap_node
, &ofproto
->facets
) {
1024 || tag_set_intersects(&revalidate_set
, facet
->xout
.tags
)) {
1025 facet_revalidate(facet
);
1032 if (!backer
->recv_set_enable
) {
1033 /* Wake up before a max of 1000ms. */
1034 timer_set_duration(&backer
->next_expiration
, 1000);
1035 } else if (timer_expired(&backer
->next_expiration
)) {
1036 int delay
= expire(backer
);
1037 timer_set_duration(&backer
->next_expiration
, delay
);
1040 /* Check for port changes in the dpif. */
1041 while ((error
= dpif_port_poll(backer
->dpif
, &devname
)) == 0) {
1042 struct ofproto_dpif
*ofproto
;
1043 struct dpif_port port
;
1045 /* Don't report on the datapath's device. */
1046 if (!strcmp(devname
, dpif_base_name(backer
->dpif
))) {
1050 HMAP_FOR_EACH (ofproto
, all_ofproto_dpifs_node
,
1051 &all_ofproto_dpifs
) {
1052 if (simap_contains(&ofproto
->backer
->tnl_backers
, devname
)) {
1057 ofproto
= lookup_ofproto_dpif_by_port_name(devname
);
1058 if (dpif_port_query_by_name(backer
->dpif
, devname
, &port
)) {
1059 /* The port was removed. If we know the datapath,
1060 * report it through poll_set(). If we don't, it may be
1061 * notifying us of a removal we initiated, so ignore it.
1062 * If there's a pending ENOBUFS, let it stand, since
1063 * everything will be reevaluated. */
1064 if (ofproto
&& ofproto
->port_poll_errno
!= ENOBUFS
) {
1065 sset_add(&ofproto
->port_poll_set
, devname
);
1066 ofproto
->port_poll_errno
= 0;
1068 } else if (!ofproto
) {
1069 /* The port was added, but we don't know with which
1070 * ofproto we should associate it. Delete it. */
1071 dpif_port_del(backer
->dpif
, port
.port_no
);
1073 dpif_port_destroy(&port
);
1079 if (error
!= EAGAIN
) {
1080 struct ofproto_dpif
*ofproto
;
1082 /* There was some sort of error, so propagate it to all
1083 * ofprotos that use this backer. */
1084 HMAP_FOR_EACH (ofproto
, all_ofproto_dpifs_node
,
1085 &all_ofproto_dpifs
) {
1086 if (ofproto
->backer
== backer
) {
1087 sset_clear(&ofproto
->port_poll_set
);
1088 ofproto
->port_poll_errno
= error
;
1097 dpif_backer_run_fast(struct dpif_backer
*backer
, int max_batch
)
1101 /* If recv_set_enable is false, we should not handle upcalls. */
1102 if (!backer
->recv_set_enable
) {
1106 /* Handle one or more batches of upcalls, until there's nothing left to do
1107 * or until we do a fixed total amount of work.
1109 * We do work in batches because it can be much cheaper to set up a number
1110 * of flows and fire off their patches all at once. We do multiple batches
1111 * because in some cases handling a packet can cause another packet to be
1112 * queued almost immediately as part of the return flow. Both
1113 * optimizations can make major improvements on some benchmarks and
1114 * presumably for real traffic as well. */
1116 while (work
< max_batch
) {
1117 int retval
= handle_upcalls(backer
, max_batch
- work
);
1128 type_run_fast(const char *type
)
1130 struct dpif_backer
*backer
;
1132 backer
= shash_find_data(&all_dpif_backers
, type
);
1134 /* This is not necessarily a problem, since backers are only
1135 * created on demand. */
1139 return dpif_backer_run_fast(backer
, FLOW_MISS_MAX_BATCH
);
1145 static long long int port_rl
= LLONG_MIN
;
1146 static unsigned int backer_rl
= 0;
1148 if (time_msec() >= port_rl
) {
1149 struct ofproto_dpif
*ofproto
;
1150 struct ofport_dpif
*ofport
;
1152 HMAP_FOR_EACH (ofproto
, all_ofproto_dpifs_node
, &all_ofproto_dpifs
) {
1154 HMAP_FOR_EACH (ofport
, up
.hmap_node
, &ofproto
->up
.ports
) {
1155 port_run_fast(ofport
);
1158 port_rl
= time_msec() + 200;
1161 /* XXX: We have to be careful not to do too much work in this function. If
1162 * we call dpif_backer_run_fast() too often, or with too large a batch,
1163 * performance improves signifcantly, but at a cost. It's possible for the
1164 * number of flows in the datapath to increase without bound, and for poll
1165 * loops to take 10s of seconds. The correct solution to this problem,
1166 * long term, is to separate flow miss handling into it's own thread so it
1167 * isn't affected by revalidations, and expirations. Until then, this is
1168 * the best we can do. */
1169 if (++backer_rl
>= 10) {
1170 struct shash_node
*node
;
1173 SHASH_FOR_EACH (node
, &all_dpif_backers
) {
1174 dpif_backer_run_fast(node
->data
, 1);
1180 type_wait(const char *type
)
1182 struct dpif_backer
*backer
;
1184 backer
= shash_find_data(&all_dpif_backers
, type
);
1186 /* This is not necessarily a problem, since backers are only
1187 * created on demand. */
1191 timer_wait(&backer
->next_expiration
);
1194 /* Basic life-cycle. */
1196 static int add_internal_flows(struct ofproto_dpif
*);
1198 static struct ofproto
*
1201 struct ofproto_dpif
*ofproto
= xmalloc(sizeof *ofproto
);
1202 return &ofproto
->up
;
1206 dealloc(struct ofproto
*ofproto_
)
1208 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
1213 close_dpif_backer(struct dpif_backer
*backer
)
1215 struct shash_node
*node
;
1217 ovs_assert(backer
->refcount
> 0);
1219 if (--backer
->refcount
) {
1223 drop_key_clear(backer
);
1224 hmap_destroy(&backer
->drop_keys
);
1226 simap_destroy(&backer
->tnl_backers
);
1227 hmap_destroy(&backer
->odp_to_ofport_map
);
1228 node
= shash_find(&all_dpif_backers
, backer
->type
);
1230 shash_delete(&all_dpif_backers
, node
);
1231 dpif_close(backer
->dpif
);
1236 /* Datapath port slated for removal from datapath. */
1237 struct odp_garbage
{
1238 struct list list_node
;
1243 open_dpif_backer(const char *type
, struct dpif_backer
**backerp
)
1245 struct dpif_backer
*backer
;
1246 struct dpif_port_dump port_dump
;
1247 struct dpif_port port
;
1248 struct shash_node
*node
;
1249 struct list garbage_list
;
1250 struct odp_garbage
*garbage
, *next
;
1256 backer
= shash_find_data(&all_dpif_backers
, type
);
1263 backer_name
= xasprintf("ovs-%s", type
);
1265 /* Remove any existing datapaths, since we assume we're the only
1266 * userspace controlling the datapath. */
1268 dp_enumerate_names(type
, &names
);
1269 SSET_FOR_EACH(name
, &names
) {
1270 struct dpif
*old_dpif
;
1272 /* Don't remove our backer if it exists. */
1273 if (!strcmp(name
, backer_name
)) {
1277 if (dpif_open(name
, type
, &old_dpif
)) {
1278 VLOG_WARN("couldn't open old datapath %s to remove it", name
);
1280 dpif_delete(old_dpif
);
1281 dpif_close(old_dpif
);
1284 sset_destroy(&names
);
1286 backer
= xmalloc(sizeof *backer
);
1288 error
= dpif_create_and_open(backer_name
, type
, &backer
->dpif
);
1291 VLOG_ERR("failed to open datapath of type %s: %s", type
,
1297 backer
->type
= xstrdup(type
);
1298 backer
->refcount
= 1;
1299 hmap_init(&backer
->odp_to_ofport_map
);
1300 hmap_init(&backer
->drop_keys
);
1301 timer_set_duration(&backer
->next_expiration
, 1000);
1302 backer
->need_revalidate
= 0;
1303 simap_init(&backer
->tnl_backers
);
1304 tag_set_init(&backer
->revalidate_set
);
1305 backer
->recv_set_enable
= !ofproto_get_flow_restore_wait();
1308 if (backer
->recv_set_enable
) {
1309 dpif_flow_flush(backer
->dpif
);
1312 /* Loop through the ports already on the datapath and remove any
1313 * that we don't need anymore. */
1314 list_init(&garbage_list
);
1315 dpif_port_dump_start(&port_dump
, backer
->dpif
);
1316 while (dpif_port_dump_next(&port_dump
, &port
)) {
1317 node
= shash_find(&init_ofp_ports
, port
.name
);
1318 if (!node
&& strcmp(port
.name
, dpif_base_name(backer
->dpif
))) {
1319 garbage
= xmalloc(sizeof *garbage
);
1320 garbage
->odp_port
= port
.port_no
;
1321 list_push_front(&garbage_list
, &garbage
->list_node
);
1324 dpif_port_dump_done(&port_dump
);
1326 LIST_FOR_EACH_SAFE (garbage
, next
, list_node
, &garbage_list
) {
1327 dpif_port_del(backer
->dpif
, garbage
->odp_port
);
1328 list_remove(&garbage
->list_node
);
1332 shash_add(&all_dpif_backers
, type
, backer
);
1334 error
= dpif_recv_set(backer
->dpif
, backer
->recv_set_enable
);
1336 VLOG_ERR("failed to listen on datapath of type %s: %s",
1337 type
, strerror(error
));
1338 close_dpif_backer(backer
);
1346 construct(struct ofproto
*ofproto_
)
1348 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
1349 struct shash_node
*node
, *next
;
1354 error
= open_dpif_backer(ofproto
->up
.type
, &ofproto
->backer
);
1359 max_ports
= dpif_get_max_ports(ofproto
->backer
->dpif
);
1360 ofproto_init_max_ports(ofproto_
, MIN(max_ports
, OFPP_MAX
));
1362 ofproto
->netflow
= NULL
;
1363 ofproto
->sflow
= NULL
;
1364 ofproto
->ipfix
= NULL
;
1365 ofproto
->stp
= NULL
;
1366 hmap_init(&ofproto
->bundles
);
1367 ofproto
->ml
= mac_learning_create(MAC_ENTRY_DEFAULT_IDLE_TIME
);
1368 for (i
= 0; i
< MAX_MIRRORS
; i
++) {
1369 ofproto
->mirrors
[i
] = NULL
;
1371 ofproto
->has_bonded_bundles
= false;
1373 hmap_init(&ofproto
->facets
);
1374 hmap_init(&ofproto
->subfacets
);
1375 ofproto
->governor
= NULL
;
1376 ofproto
->consistency_rl
= LLONG_MIN
;
1378 for (i
= 0; i
< N_TABLES
; i
++) {
1379 struct table_dpif
*table
= &ofproto
->tables
[i
];
1381 table
->catchall_table
= NULL
;
1382 table
->other_table
= NULL
;
1383 table
->basis
= random_uint32();
1386 list_init(&ofproto
->completions
);
1388 ofproto_dpif_unixctl_init();
1390 ofproto
->has_mirrors
= false;
1391 ofproto
->has_bundle_action
= false;
1393 hmap_init(&ofproto
->vlandev_map
);
1394 hmap_init(&ofproto
->realdev_vid_map
);
1396 sset_init(&ofproto
->ports
);
1397 sset_init(&ofproto
->ghost_ports
);
1398 sset_init(&ofproto
->port_poll_set
);
1399 ofproto
->port_poll_errno
= 0;
1401 SHASH_FOR_EACH_SAFE (node
, next
, &init_ofp_ports
) {
1402 struct iface_hint
*iface_hint
= node
->data
;
1404 if (!strcmp(iface_hint
->br_name
, ofproto
->up
.name
)) {
1405 /* Check if the datapath already has this port. */
1406 if (dpif_port_exists(ofproto
->backer
->dpif
, node
->name
)) {
1407 sset_add(&ofproto
->ports
, node
->name
);
1410 free(iface_hint
->br_name
);
1411 free(iface_hint
->br_type
);
1413 shash_delete(&init_ofp_ports
, node
);
1417 hmap_insert(&all_ofproto_dpifs
, &ofproto
->all_ofproto_dpifs_node
,
1418 hash_string(ofproto
->up
.name
, 0));
1419 memset(&ofproto
->stats
, 0, sizeof ofproto
->stats
);
1421 ofproto_init_tables(ofproto_
, N_TABLES
);
1422 error
= add_internal_flows(ofproto
);
1423 ofproto
->up
.tables
[TBL_INTERNAL
].flags
= OFTABLE_HIDDEN
| OFTABLE_READONLY
;
1426 ofproto
->n_missed
= 0;
1428 ofproto
->max_n_subfacet
= 0;
1429 ofproto
->created
= time_msec();
1430 ofproto
->last_minute
= ofproto
->created
;
1431 memset(&ofproto
->hourly
, 0, sizeof ofproto
->hourly
);
1432 memset(&ofproto
->daily
, 0, sizeof ofproto
->daily
);
1433 ofproto
->subfacet_add_count
= 0;
1434 ofproto
->subfacet_del_count
= 0;
1435 ofproto
->total_subfacet_add_count
= 0;
1436 ofproto
->total_subfacet_del_count
= 0;
1437 ofproto
->total_subfacet_life_span
= 0;
1438 ofproto
->total_subfacet_count
= 0;
1439 ofproto
->n_update_stats
= 0;
1445 add_internal_flow(struct ofproto_dpif
*ofproto
, int id
,
1446 const struct ofpbuf
*ofpacts
, struct rule_dpif
**rulep
)
1448 struct ofputil_flow_mod fm
;
1451 match_init_catchall(&fm
.match
);
1453 match_set_reg(&fm
.match
, 0, id
);
1454 fm
.new_cookie
= htonll(0);
1455 fm
.cookie
= htonll(0);
1456 fm
.cookie_mask
= htonll(0);
1457 fm
.table_id
= TBL_INTERNAL
;
1458 fm
.command
= OFPFC_ADD
;
1459 fm
.idle_timeout
= 0;
1460 fm
.hard_timeout
= 0;
1464 fm
.ofpacts
= ofpacts
->data
;
1465 fm
.ofpacts_len
= ofpacts
->size
;
1467 error
= ofproto_flow_mod(&ofproto
->up
, &fm
);
1469 VLOG_ERR_RL(&rl
, "failed to add internal flow %d (%s)",
1470 id
, ofperr_to_string(error
));
1474 *rulep
= rule_dpif_lookup__(ofproto
, &fm
.match
.flow
, TBL_INTERNAL
);
1475 ovs_assert(*rulep
!= NULL
);
1481 add_internal_flows(struct ofproto_dpif
*ofproto
)
1483 struct ofpact_controller
*controller
;
1484 uint64_t ofpacts_stub
[128 / 8];
1485 struct ofpbuf ofpacts
;
1489 ofpbuf_use_stack(&ofpacts
, ofpacts_stub
, sizeof ofpacts_stub
);
1492 controller
= ofpact_put_CONTROLLER(&ofpacts
);
1493 controller
->max_len
= UINT16_MAX
;
1494 controller
->controller_id
= 0;
1495 controller
->reason
= OFPR_NO_MATCH
;
1496 ofpact_pad(&ofpacts
);
1498 error
= add_internal_flow(ofproto
, id
++, &ofpacts
, &ofproto
->miss_rule
);
1503 ofpbuf_clear(&ofpacts
);
1504 error
= add_internal_flow(ofproto
, id
++, &ofpacts
,
1505 &ofproto
->no_packet_in_rule
);
1510 error
= add_internal_flow(ofproto
, id
++, &ofpacts
,
1511 &ofproto
->drop_frags_rule
);
1516 complete_operations(struct ofproto_dpif
*ofproto
)
1518 struct dpif_completion
*c
, *next
;
1520 LIST_FOR_EACH_SAFE (c
, next
, list_node
, &ofproto
->completions
) {
1521 ofoperation_complete(c
->op
, 0);
1522 list_remove(&c
->list_node
);
1528 destruct(struct ofproto
*ofproto_
)
1530 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
1531 struct rule_dpif
*rule
, *next_rule
;
1532 struct oftable
*table
;
1535 hmap_remove(&all_ofproto_dpifs
, &ofproto
->all_ofproto_dpifs_node
);
1536 complete_operations(ofproto
);
1538 OFPROTO_FOR_EACH_TABLE (table
, &ofproto
->up
) {
1539 struct cls_cursor cursor
;
1541 cls_cursor_init(&cursor
, &table
->cls
, NULL
);
1542 CLS_CURSOR_FOR_EACH_SAFE (rule
, next_rule
, up
.cr
, &cursor
) {
1543 ofproto_rule_destroy(&rule
->up
);
1547 for (i
= 0; i
< MAX_MIRRORS
; i
++) {
1548 mirror_destroy(ofproto
->mirrors
[i
]);
1551 netflow_destroy(ofproto
->netflow
);
1552 dpif_sflow_destroy(ofproto
->sflow
);
1553 hmap_destroy(&ofproto
->bundles
);
1554 mac_learning_destroy(ofproto
->ml
);
1556 hmap_destroy(&ofproto
->facets
);
1557 hmap_destroy(&ofproto
->subfacets
);
1558 governor_destroy(ofproto
->governor
);
1560 hmap_destroy(&ofproto
->vlandev_map
);
1561 hmap_destroy(&ofproto
->realdev_vid_map
);
1563 sset_destroy(&ofproto
->ports
);
1564 sset_destroy(&ofproto
->ghost_ports
);
1565 sset_destroy(&ofproto
->port_poll_set
);
1567 close_dpif_backer(ofproto
->backer
);
1571 run_fast(struct ofproto
*ofproto_
)
1573 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
1574 struct ofport_dpif
*ofport
;
1576 /* Do not perform any periodic activity required by 'ofproto' while
1577 * waiting for flow restore to complete. */
1578 if (ofproto_get_flow_restore_wait()) {
1582 HMAP_FOR_EACH (ofport
, up
.hmap_node
, &ofproto
->up
.ports
) {
1583 port_run_fast(ofport
);
1590 run(struct ofproto
*ofproto_
)
1592 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
1593 struct ofport_dpif
*ofport
;
1594 struct ofbundle
*bundle
;
1598 complete_operations(ofproto
);
1601 /* Do not perform any periodic activity below required by 'ofproto' while
1602 * waiting for flow restore to complete. */
1603 if (ofproto_get_flow_restore_wait()) {
1607 error
= run_fast(ofproto_
);
1612 if (ofproto
->netflow
) {
1613 if (netflow_run(ofproto
->netflow
)) {
1614 send_netflow_active_timeouts(ofproto
);
1617 if (ofproto
->sflow
) {
1618 dpif_sflow_run(ofproto
->sflow
);
1621 HMAP_FOR_EACH (ofport
, up
.hmap_node
, &ofproto
->up
.ports
) {
1624 HMAP_FOR_EACH (bundle
, hmap_node
, &ofproto
->bundles
) {
1629 mac_learning_run(ofproto
->ml
, &ofproto
->backer
->revalidate_set
);
1631 /* Check the consistency of a random facet, to aid debugging. */
1632 if (time_msec() >= ofproto
->consistency_rl
1633 && !hmap_is_empty(&ofproto
->facets
)
1634 && !ofproto
->backer
->need_revalidate
) {
1635 struct facet
*facet
;
1637 ofproto
->consistency_rl
= time_msec() + 250;
1639 facet
= CONTAINER_OF(hmap_random_node(&ofproto
->facets
),
1640 struct facet
, hmap_node
);
1641 if (!tag_set_intersects(&ofproto
->backer
->revalidate_set
,
1642 facet
->xout
.tags
)) {
1643 if (!facet_check_consistency(facet
)) {
1644 ofproto
->backer
->need_revalidate
= REV_INCONSISTENCY
;
1649 if (ofproto
->governor
) {
1652 governor_run(ofproto
->governor
);
1654 /* If the governor has shrunk to its minimum size and the number of
1655 * subfacets has dwindled, then drop the governor entirely.
1657 * For hysteresis, the number of subfacets to drop the governor is
1658 * smaller than the number needed to trigger its creation. */
1659 n_subfacets
= hmap_count(&ofproto
->subfacets
);
1660 if (n_subfacets
* 4 < ofproto
->up
.flow_eviction_threshold
1661 && governor_is_idle(ofproto
->governor
)) {
1662 governor_destroy(ofproto
->governor
);
1663 ofproto
->governor
= NULL
;
1671 wait(struct ofproto
*ofproto_
)
1673 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
1674 struct ofport_dpif
*ofport
;
1675 struct ofbundle
*bundle
;
1677 if (!clogged
&& !list_is_empty(&ofproto
->completions
)) {
1678 poll_immediate_wake();
1681 if (ofproto_get_flow_restore_wait()) {
1685 dpif_wait(ofproto
->backer
->dpif
);
1686 dpif_recv_wait(ofproto
->backer
->dpif
);
1687 if (ofproto
->sflow
) {
1688 dpif_sflow_wait(ofproto
->sflow
);
1690 if (!tag_set_is_empty(&ofproto
->backer
->revalidate_set
)) {
1691 poll_immediate_wake();
1693 HMAP_FOR_EACH (ofport
, up
.hmap_node
, &ofproto
->up
.ports
) {
1696 HMAP_FOR_EACH (bundle
, hmap_node
, &ofproto
->bundles
) {
1697 bundle_wait(bundle
);
1699 if (ofproto
->netflow
) {
1700 netflow_wait(ofproto
->netflow
);
1702 mac_learning_wait(ofproto
->ml
);
1704 if (ofproto
->backer
->need_revalidate
) {
1705 /* Shouldn't happen, but if it does just go around again. */
1706 VLOG_DBG_RL(&rl
, "need revalidate in ofproto_wait_cb()");
1707 poll_immediate_wake();
1709 if (ofproto
->governor
) {
1710 governor_wait(ofproto
->governor
);
1715 get_memory_usage(const struct ofproto
*ofproto_
, struct simap
*usage
)
1717 const struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
1719 simap_increase(usage
, "facets", hmap_count(&ofproto
->facets
));
1720 simap_increase(usage
, "subfacets", hmap_count(&ofproto
->subfacets
));
1724 flush(struct ofproto
*ofproto_
)
1726 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
1727 struct subfacet
*subfacet
, *next_subfacet
;
1728 struct subfacet
*batch
[SUBFACET_DESTROY_MAX_BATCH
];
1732 HMAP_FOR_EACH_SAFE (subfacet
, next_subfacet
, hmap_node
,
1733 &ofproto
->subfacets
) {
1734 if (subfacet
->path
!= SF_NOT_INSTALLED
) {
1735 batch
[n_batch
++] = subfacet
;
1736 if (n_batch
>= SUBFACET_DESTROY_MAX_BATCH
) {
1737 subfacet_destroy_batch(ofproto
, batch
, n_batch
);
1741 subfacet_destroy(subfacet
);
1746 subfacet_destroy_batch(ofproto
, batch
, n_batch
);
1751 get_features(struct ofproto
*ofproto_ OVS_UNUSED
,
1752 bool *arp_match_ip
, enum ofputil_action_bitmap
*actions
)
1754 *arp_match_ip
= true;
1755 *actions
= (OFPUTIL_A_OUTPUT
|
1756 OFPUTIL_A_SET_VLAN_VID
|
1757 OFPUTIL_A_SET_VLAN_PCP
|
1758 OFPUTIL_A_STRIP_VLAN
|
1759 OFPUTIL_A_SET_DL_SRC
|
1760 OFPUTIL_A_SET_DL_DST
|
1761 OFPUTIL_A_SET_NW_SRC
|
1762 OFPUTIL_A_SET_NW_DST
|
1763 OFPUTIL_A_SET_NW_TOS
|
1764 OFPUTIL_A_SET_TP_SRC
|
1765 OFPUTIL_A_SET_TP_DST
|
1770 get_tables(struct ofproto
*ofproto_
, struct ofp12_table_stats
*ots
)
1772 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
1773 struct dpif_dp_stats s
;
1774 uint64_t n_miss
, n_no_pkt_in
, n_bytes
, n_dropped_frags
;
1777 strcpy(ots
->name
, "classifier");
1779 dpif_get_dp_stats(ofproto
->backer
->dpif
, &s
);
1780 rule_get_stats(&ofproto
->miss_rule
->up
, &n_miss
, &n_bytes
);
1781 rule_get_stats(&ofproto
->no_packet_in_rule
->up
, &n_no_pkt_in
, &n_bytes
);
1782 rule_get_stats(&ofproto
->drop_frags_rule
->up
, &n_dropped_frags
, &n_bytes
);
1784 n_lookup
= s
.n_hit
+ s
.n_missed
- n_dropped_frags
;
1785 ots
->lookup_count
= htonll(n_lookup
);
1786 ots
->matched_count
= htonll(n_lookup
- n_miss
- n_no_pkt_in
);
1789 static struct ofport
*
1792 struct ofport_dpif
*port
= xmalloc(sizeof *port
);
1797 port_dealloc(struct ofport
*port_
)
1799 struct ofport_dpif
*port
= ofport_dpif_cast(port_
);
1804 port_construct(struct ofport
*port_
)
1806 struct ofport_dpif
*port
= ofport_dpif_cast(port_
);
1807 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(port
->up
.ofproto
);
1808 const struct netdev
*netdev
= port
->up
.netdev
;
1809 struct dpif_port dpif_port
;
1812 ofproto
->backer
->need_revalidate
= REV_RECONFIGURE
;
1813 port
->bundle
= NULL
;
1816 port
->tag
= tag_create_random();
1817 port
->may_enable
= true;
1818 port
->stp_port
= NULL
;
1819 port
->stp_state
= STP_DISABLED
;
1820 port
->tnl_port
= NULL
;
1821 hmap_init(&port
->priorities
);
1822 port
->realdev_ofp_port
= 0;
1823 port
->vlandev_vid
= 0;
1824 port
->carrier_seq
= netdev_get_carrier_resets(netdev
);
1826 if (netdev_vport_is_patch(netdev
)) {
1827 /* By bailing out here, we don't submit the port to the sFlow module
1828 * to be considered for counter polling export. This is correct
1829 * because the patch port represents an interface that sFlow considers
1830 * to be "internal" to the switch as a whole, and therefore not an
1831 * candidate for counter polling. */
1832 port
->odp_port
= OVSP_NONE
;
1836 error
= dpif_port_query_by_name(ofproto
->backer
->dpif
,
1837 netdev_vport_get_dpif_port(netdev
),
1843 port
->odp_port
= dpif_port
.port_no
;
1845 if (netdev_get_tunnel_config(netdev
)) {
1846 port
->tnl_port
= tnl_port_add(&port
->up
, port
->odp_port
);
1848 /* Sanity-check that a mapping doesn't already exist. This
1849 * shouldn't happen for non-tunnel ports. */
1850 if (odp_port_to_ofp_port(ofproto
, port
->odp_port
) != OFPP_NONE
) {
1851 VLOG_ERR("port %s already has an OpenFlow port number",
1853 dpif_port_destroy(&dpif_port
);
1857 hmap_insert(&ofproto
->backer
->odp_to_ofport_map
, &port
->odp_port_node
,
1858 hash_int(port
->odp_port
, 0));
1860 dpif_port_destroy(&dpif_port
);
1862 if (ofproto
->sflow
) {
1863 dpif_sflow_add_port(ofproto
->sflow
, port_
, port
->odp_port
);
1870 port_destruct(struct ofport
*port_
)
1872 struct ofport_dpif
*port
= ofport_dpif_cast(port_
);
1873 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(port
->up
.ofproto
);
1874 const char *dp_port_name
= netdev_vport_get_dpif_port(port
->up
.netdev
);
1875 const char *devname
= netdev_get_name(port
->up
.netdev
);
1877 if (dpif_port_exists(ofproto
->backer
->dpif
, dp_port_name
)) {
1878 /* The underlying device is still there, so delete it. This
1879 * happens when the ofproto is being destroyed, since the caller
1880 * assumes that removal of attached ports will happen as part of
1882 if (!port
->tnl_port
) {
1883 dpif_port_del(ofproto
->backer
->dpif
, port
->odp_port
);
1885 ofproto
->backer
->need_revalidate
= REV_RECONFIGURE
;
1888 if (port
->odp_port
!= OVSP_NONE
&& !port
->tnl_port
) {
1889 hmap_remove(&ofproto
->backer
->odp_to_ofport_map
, &port
->odp_port_node
);
1892 tnl_port_del(port
->tnl_port
);
1893 sset_find_and_delete(&ofproto
->ports
, devname
);
1894 sset_find_and_delete(&ofproto
->ghost_ports
, devname
);
1895 ofproto
->backer
->need_revalidate
= REV_RECONFIGURE
;
1896 bundle_remove(port_
);
1897 set_cfm(port_
, NULL
);
1898 set_bfd(port_
, NULL
);
1899 if (ofproto
->sflow
) {
1900 dpif_sflow_del_port(ofproto
->sflow
, port
->odp_port
);
1903 ofport_clear_priorities(port
);
1904 hmap_destroy(&port
->priorities
);
1908 port_modified(struct ofport
*port_
)
1910 struct ofport_dpif
*port
= ofport_dpif_cast(port_
);
1912 if (port
->bundle
&& port
->bundle
->bond
) {
1913 bond_slave_set_netdev(port
->bundle
->bond
, port
, port
->up
.netdev
);
1917 cfm_set_netdev(port
->cfm
, port
->up
.netdev
);
1922 port_reconfigured(struct ofport
*port_
, enum ofputil_port_config old_config
)
1924 struct ofport_dpif
*port
= ofport_dpif_cast(port_
);
1925 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(port
->up
.ofproto
);
1926 enum ofputil_port_config changed
= old_config
^ port
->up
.pp
.config
;
1928 if (changed
& (OFPUTIL_PC_NO_RECV
| OFPUTIL_PC_NO_RECV_STP
|
1929 OFPUTIL_PC_NO_FWD
| OFPUTIL_PC_NO_FLOOD
|
1930 OFPUTIL_PC_NO_PACKET_IN
)) {
1931 ofproto
->backer
->need_revalidate
= REV_RECONFIGURE
;
1933 if (changed
& OFPUTIL_PC_NO_FLOOD
&& port
->bundle
) {
1934 bundle_update(port
->bundle
);
1940 set_sflow(struct ofproto
*ofproto_
,
1941 const struct ofproto_sflow_options
*sflow_options
)
1943 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
1944 struct dpif_sflow
*ds
= ofproto
->sflow
;
1946 if (sflow_options
) {
1948 struct ofport_dpif
*ofport
;
1950 ds
= ofproto
->sflow
= dpif_sflow_create();
1951 HMAP_FOR_EACH (ofport
, up
.hmap_node
, &ofproto
->up
.ports
) {
1952 dpif_sflow_add_port(ds
, &ofport
->up
, ofport
->odp_port
);
1954 ofproto
->backer
->need_revalidate
= REV_RECONFIGURE
;
1956 dpif_sflow_set_options(ds
, sflow_options
);
1959 dpif_sflow_destroy(ds
);
1960 ofproto
->backer
->need_revalidate
= REV_RECONFIGURE
;
1961 ofproto
->sflow
= NULL
;
1969 struct ofproto
*ofproto_
,
1970 const struct ofproto_ipfix_bridge_exporter_options
*bridge_exporter_options
,
1971 const struct ofproto_ipfix_flow_exporter_options
*flow_exporters_options
,
1972 size_t n_flow_exporters_options
)
1974 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
1975 struct dpif_ipfix
*di
= ofproto
->ipfix
;
1977 if (bridge_exporter_options
|| flow_exporters_options
) {
1979 di
= ofproto
->ipfix
= dpif_ipfix_create();
1981 dpif_ipfix_set_options(
1982 di
, bridge_exporter_options
, flow_exporters_options
,
1983 n_flow_exporters_options
);
1986 dpif_ipfix_destroy(di
);
1987 ofproto
->ipfix
= NULL
;
1994 set_cfm(struct ofport
*ofport_
, const struct cfm_settings
*s
)
1996 struct ofport_dpif
*ofport
= ofport_dpif_cast(ofport_
);
2003 struct ofproto_dpif
*ofproto
;
2005 ofproto
= ofproto_dpif_cast(ofport
->up
.ofproto
);
2006 ofproto
->backer
->need_revalidate
= REV_RECONFIGURE
;
2007 ofport
->cfm
= cfm_create(ofport
->up
.netdev
);
2010 if (cfm_configure(ofport
->cfm
, s
)) {
2016 cfm_destroy(ofport
->cfm
);
2022 get_cfm_status(const struct ofport
*ofport_
,
2023 struct ofproto_cfm_status
*status
)
2025 struct ofport_dpif
*ofport
= ofport_dpif_cast(ofport_
);
2028 status
->faults
= cfm_get_fault(ofport
->cfm
);
2029 status
->remote_opstate
= cfm_get_opup(ofport
->cfm
);
2030 status
->health
= cfm_get_health(ofport
->cfm
);
2031 cfm_get_remote_mpids(ofport
->cfm
, &status
->rmps
, &status
->n_rmps
);
2039 set_bfd(struct ofport
*ofport_
, const struct smap
*cfg
)
2041 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofport_
->ofproto
);
2042 struct ofport_dpif
*ofport
= ofport_dpif_cast(ofport_
);
2046 ofport
->bfd
= bfd_configure(old
, netdev_get_name(ofport
->up
.netdev
), cfg
);
2047 if (ofport
->bfd
!= old
) {
2048 ofproto
->backer
->need_revalidate
= REV_RECONFIGURE
;
2055 get_bfd_status(struct ofport
*ofport_
, struct smap
*smap
)
2057 struct ofport_dpif
*ofport
= ofport_dpif_cast(ofport_
);
2060 bfd_get_status(ofport
->bfd
, smap
);
2067 /* Spanning Tree. */
2070 send_bpdu_cb(struct ofpbuf
*pkt
, int port_num
, void *ofproto_
)
2072 struct ofproto_dpif
*ofproto
= ofproto_
;
2073 struct stp_port
*sp
= stp_get_port(ofproto
->stp
, port_num
);
2074 struct ofport_dpif
*ofport
;
2076 ofport
= stp_port_get_aux(sp
);
2078 VLOG_WARN_RL(&rl
, "%s: cannot send BPDU on unknown port %d",
2079 ofproto
->up
.name
, port_num
);
2081 struct eth_header
*eth
= pkt
->l2
;
2083 netdev_get_etheraddr(ofport
->up
.netdev
, eth
->eth_src
);
2084 if (eth_addr_is_zero(eth
->eth_src
)) {
2085 VLOG_WARN_RL(&rl
, "%s: cannot send BPDU on port %d "
2086 "with unknown MAC", ofproto
->up
.name
, port_num
);
2088 send_packet(ofport
, pkt
);
2094 /* Configures STP on 'ofproto_' using the settings defined in 's'. */
2096 set_stp(struct ofproto
*ofproto_
, const struct ofproto_stp_settings
*s
)
2098 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
2100 /* Only revalidate flows if the configuration changed. */
2101 if (!s
!= !ofproto
->stp
) {
2102 ofproto
->backer
->need_revalidate
= REV_RECONFIGURE
;
2106 if (!ofproto
->stp
) {
2107 ofproto
->stp
= stp_create(ofproto_
->name
, s
->system_id
,
2108 send_bpdu_cb
, ofproto
);
2109 ofproto
->stp_last_tick
= time_msec();
2112 stp_set_bridge_id(ofproto
->stp
, s
->system_id
);
2113 stp_set_bridge_priority(ofproto
->stp
, s
->priority
);
2114 stp_set_hello_time(ofproto
->stp
, s
->hello_time
);
2115 stp_set_max_age(ofproto
->stp
, s
->max_age
);
2116 stp_set_forward_delay(ofproto
->stp
, s
->fwd_delay
);
2118 struct ofport
*ofport
;
2120 HMAP_FOR_EACH (ofport
, hmap_node
, &ofproto
->up
.ports
) {
2121 set_stp_port(ofport
, NULL
);
2124 stp_destroy(ofproto
->stp
);
2125 ofproto
->stp
= NULL
;
2132 get_stp_status(struct ofproto
*ofproto_
, struct ofproto_stp_status
*s
)
2134 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
2138 s
->bridge_id
= stp_get_bridge_id(ofproto
->stp
);
2139 s
->designated_root
= stp_get_designated_root(ofproto
->stp
);
2140 s
->root_path_cost
= stp_get_root_path_cost(ofproto
->stp
);
2149 update_stp_port_state(struct ofport_dpif
*ofport
)
2151 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofport
->up
.ofproto
);
2152 enum stp_state state
;
2154 /* Figure out new state. */
2155 state
= ofport
->stp_port
? stp_port_get_state(ofport
->stp_port
)
2159 if (ofport
->stp_state
!= state
) {
2160 enum ofputil_port_state of_state
;
2163 VLOG_DBG_RL(&rl
, "port %s: STP state changed from %s to %s",
2164 netdev_get_name(ofport
->up
.netdev
),
2165 stp_state_name(ofport
->stp_state
),
2166 stp_state_name(state
));
2167 if (stp_learn_in_state(ofport
->stp_state
)
2168 != stp_learn_in_state(state
)) {
2169 /* xxx Learning action flows should also be flushed. */
2170 mac_learning_flush(ofproto
->ml
,
2171 &ofproto
->backer
->revalidate_set
);
2173 fwd_change
= stp_forward_in_state(ofport
->stp_state
)
2174 != stp_forward_in_state(state
);
2176 ofproto
->backer
->need_revalidate
= REV_STP
;
2177 ofport
->stp_state
= state
;
2178 ofport
->stp_state_entered
= time_msec();
2180 if (fwd_change
&& ofport
->bundle
) {
2181 bundle_update(ofport
->bundle
);
2184 /* Update the STP state bits in the OpenFlow port description. */
2185 of_state
= ofport
->up
.pp
.state
& ~OFPUTIL_PS_STP_MASK
;
2186 of_state
|= (state
== STP_LISTENING
? OFPUTIL_PS_STP_LISTEN
2187 : state
== STP_LEARNING
? OFPUTIL_PS_STP_LEARN
2188 : state
== STP_FORWARDING
? OFPUTIL_PS_STP_FORWARD
2189 : state
== STP_BLOCKING
? OFPUTIL_PS_STP_BLOCK
2191 ofproto_port_set_state(&ofport
->up
, of_state
);
2195 /* Configures STP on 'ofport_' using the settings defined in 's'. The
2196 * caller is responsible for assigning STP port numbers and ensuring
2197 * there are no duplicates. */
2199 set_stp_port(struct ofport
*ofport_
,
2200 const struct ofproto_port_stp_settings
*s
)
2202 struct ofport_dpif
*ofport
= ofport_dpif_cast(ofport_
);
2203 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofport
->up
.ofproto
);
2204 struct stp_port
*sp
= ofport
->stp_port
;
2206 if (!s
|| !s
->enable
) {
2208 ofport
->stp_port
= NULL
;
2209 stp_port_disable(sp
);
2210 update_stp_port_state(ofport
);
2213 } else if (sp
&& stp_port_no(sp
) != s
->port_num
2214 && ofport
== stp_port_get_aux(sp
)) {
2215 /* The port-id changed, so disable the old one if it's not
2216 * already in use by another port. */
2217 stp_port_disable(sp
);
2220 sp
= ofport
->stp_port
= stp_get_port(ofproto
->stp
, s
->port_num
);
2221 stp_port_enable(sp
);
2223 stp_port_set_aux(sp
, ofport
);
2224 stp_port_set_priority(sp
, s
->priority
);
2225 stp_port_set_path_cost(sp
, s
->path_cost
);
2227 update_stp_port_state(ofport
);
2233 get_stp_port_status(struct ofport
*ofport_
,
2234 struct ofproto_port_stp_status
*s
)
2236 struct ofport_dpif
*ofport
= ofport_dpif_cast(ofport_
);
2237 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofport
->up
.ofproto
);
2238 struct stp_port
*sp
= ofport
->stp_port
;
2240 if (!ofproto
->stp
|| !sp
) {
2246 s
->port_id
= stp_port_get_id(sp
);
2247 s
->state
= stp_port_get_state(sp
);
2248 s
->sec_in_state
= (time_msec() - ofport
->stp_state_entered
) / 1000;
2249 s
->role
= stp_port_get_role(sp
);
2250 stp_port_get_counts(sp
, &s
->tx_count
, &s
->rx_count
, &s
->error_count
);
2256 stp_run(struct ofproto_dpif
*ofproto
)
2259 long long int now
= time_msec();
2260 long long int elapsed
= now
- ofproto
->stp_last_tick
;
2261 struct stp_port
*sp
;
2264 stp_tick(ofproto
->stp
, MIN(INT_MAX
, elapsed
));
2265 ofproto
->stp_last_tick
= now
;
2267 while (stp_get_changed_port(ofproto
->stp
, &sp
)) {
2268 struct ofport_dpif
*ofport
= stp_port_get_aux(sp
);
2271 update_stp_port_state(ofport
);
2275 if (stp_check_and_reset_fdb_flush(ofproto
->stp
)) {
2276 mac_learning_flush(ofproto
->ml
, &ofproto
->backer
->revalidate_set
);
2282 stp_wait(struct ofproto_dpif
*ofproto
)
2285 poll_timer_wait(1000);
2289 /* Returns true if STP should process 'flow'. */
2291 stp_should_process_flow(const struct flow
*flow
)
2293 return eth_addr_equals(flow
->dl_dst
, eth_addr_stp
);
2297 stp_process_packet(const struct ofport_dpif
*ofport
,
2298 const struct ofpbuf
*packet
)
2300 struct ofpbuf payload
= *packet
;
2301 struct eth_header
*eth
= payload
.data
;
2302 struct stp_port
*sp
= ofport
->stp_port
;
2304 /* Sink packets on ports that have STP disabled when the bridge has
2306 if (!sp
|| stp_port_get_state(sp
) == STP_DISABLED
) {
2310 /* Trim off padding on payload. */
2311 if (payload
.size
> ntohs(eth
->eth_type
) + ETH_HEADER_LEN
) {
2312 payload
.size
= ntohs(eth
->eth_type
) + ETH_HEADER_LEN
;
2315 if (ofpbuf_try_pull(&payload
, ETH_HEADER_LEN
+ LLC_HEADER_LEN
)) {
2316 stp_received_bpdu(sp
, payload
.data
, payload
.size
);
2320 static struct priority_to_dscp
*
2321 get_priority(const struct ofport_dpif
*ofport
, uint32_t priority
)
2323 struct priority_to_dscp
*pdscp
;
2326 hash
= hash_int(priority
, 0);
2327 HMAP_FOR_EACH_IN_BUCKET (pdscp
, hmap_node
, hash
, &ofport
->priorities
) {
2328 if (pdscp
->priority
== priority
) {
2336 ofport_clear_priorities(struct ofport_dpif
*ofport
)
2338 struct priority_to_dscp
*pdscp
, *next
;
2340 HMAP_FOR_EACH_SAFE (pdscp
, next
, hmap_node
, &ofport
->priorities
) {
2341 hmap_remove(&ofport
->priorities
, &pdscp
->hmap_node
);
2347 set_queues(struct ofport
*ofport_
,
2348 const struct ofproto_port_queue
*qdscp_list
,
2351 struct ofport_dpif
*ofport
= ofport_dpif_cast(ofport_
);
2352 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofport
->up
.ofproto
);
2353 struct hmap
new = HMAP_INITIALIZER(&new);
2356 for (i
= 0; i
< n_qdscp
; i
++) {
2357 struct priority_to_dscp
*pdscp
;
2361 dscp
= (qdscp_list
[i
].dscp
<< 2) & IP_DSCP_MASK
;
2362 if (dpif_queue_to_priority(ofproto
->backer
->dpif
, qdscp_list
[i
].queue
,
2367 pdscp
= get_priority(ofport
, priority
);
2369 hmap_remove(&ofport
->priorities
, &pdscp
->hmap_node
);
2371 pdscp
= xmalloc(sizeof *pdscp
);
2372 pdscp
->priority
= priority
;
2374 ofproto
->backer
->need_revalidate
= REV_RECONFIGURE
;
2377 if (pdscp
->dscp
!= dscp
) {
2379 ofproto
->backer
->need_revalidate
= REV_RECONFIGURE
;
2382 hmap_insert(&new, &pdscp
->hmap_node
, hash_int(pdscp
->priority
, 0));
2385 if (!hmap_is_empty(&ofport
->priorities
)) {
2386 ofport_clear_priorities(ofport
);
2387 ofproto
->backer
->need_revalidate
= REV_RECONFIGURE
;
2390 hmap_swap(&new, &ofport
->priorities
);
2398 /* Expires all MAC learning entries associated with 'bundle' and forces its
2399 * ofproto to revalidate every flow.
2401 * Normally MAC learning entries are removed only from the ofproto associated
2402 * with 'bundle', but if 'all_ofprotos' is true, then the MAC learning entries
2403 * are removed from every ofproto. When patch ports and SLB bonds are in use
2404 * and a VM migration happens and the gratuitous ARPs are somehow lost, this
2405 * avoids a MAC_ENTRY_IDLE_TIME delay before the migrated VM can communicate
2406 * with the host from which it migrated. */
2408 bundle_flush_macs(struct ofbundle
*bundle
, bool all_ofprotos
)
2410 struct ofproto_dpif
*ofproto
= bundle
->ofproto
;
2411 struct mac_learning
*ml
= ofproto
->ml
;
2412 struct mac_entry
*mac
, *next_mac
;
2414 ofproto
->backer
->need_revalidate
= REV_RECONFIGURE
;
2415 LIST_FOR_EACH_SAFE (mac
, next_mac
, lru_node
, &ml
->lrus
) {
2416 if (mac
->port
.p
== bundle
) {
2418 struct ofproto_dpif
*o
;
2420 HMAP_FOR_EACH (o
, all_ofproto_dpifs_node
, &all_ofproto_dpifs
) {
2422 struct mac_entry
*e
;
2424 e
= mac_learning_lookup(o
->ml
, mac
->mac
, mac
->vlan
,
2427 mac_learning_expire(o
->ml
, e
);
2433 mac_learning_expire(ml
, mac
);
2438 static struct ofbundle
*
2439 bundle_lookup(const struct ofproto_dpif
*ofproto
, void *aux
)
2441 struct ofbundle
*bundle
;
2443 HMAP_FOR_EACH_IN_BUCKET (bundle
, hmap_node
, hash_pointer(aux
, 0),
2444 &ofproto
->bundles
) {
2445 if (bundle
->aux
== aux
) {
2452 /* Looks up each of the 'n_auxes' pointers in 'auxes' as bundles and adds the
2453 * ones that are found to 'bundles'. */
2455 bundle_lookup_multiple(struct ofproto_dpif
*ofproto
,
2456 void **auxes
, size_t n_auxes
,
2457 struct hmapx
*bundles
)
2461 hmapx_init(bundles
);
2462 for (i
= 0; i
< n_auxes
; i
++) {
2463 struct ofbundle
*bundle
= bundle_lookup(ofproto
, auxes
[i
]);
2465 hmapx_add(bundles
, bundle
);
2471 bundle_update(struct ofbundle
*bundle
)
2473 struct ofport_dpif
*port
;
2475 bundle
->floodable
= true;
2476 LIST_FOR_EACH (port
, bundle_node
, &bundle
->ports
) {
2477 if (port
->up
.pp
.config
& OFPUTIL_PC_NO_FLOOD
2478 || !stp_forward_in_state(port
->stp_state
)) {
2479 bundle
->floodable
= false;
2486 bundle_del_port(struct ofport_dpif
*port
)
2488 struct ofbundle
*bundle
= port
->bundle
;
2490 bundle
->ofproto
->backer
->need_revalidate
= REV_RECONFIGURE
;
2492 list_remove(&port
->bundle_node
);
2493 port
->bundle
= NULL
;
2496 lacp_slave_unregister(bundle
->lacp
, port
);
2499 bond_slave_unregister(bundle
->bond
, port
);
2502 bundle_update(bundle
);
2506 bundle_add_port(struct ofbundle
*bundle
, uint16_t ofp_port
,
2507 struct lacp_slave_settings
*lacp
)
2509 struct ofport_dpif
*port
;
2511 port
= get_ofp_port(bundle
->ofproto
, ofp_port
);
2516 if (port
->bundle
!= bundle
) {
2517 bundle
->ofproto
->backer
->need_revalidate
= REV_RECONFIGURE
;
2519 bundle_del_port(port
);
2522 port
->bundle
= bundle
;
2523 list_push_back(&bundle
->ports
, &port
->bundle_node
);
2524 if (port
->up
.pp
.config
& OFPUTIL_PC_NO_FLOOD
2525 || !stp_forward_in_state(port
->stp_state
)) {
2526 bundle
->floodable
= false;
2530 bundle
->ofproto
->backer
->need_revalidate
= REV_RECONFIGURE
;
2531 lacp_slave_register(bundle
->lacp
, port
, lacp
);
2538 bundle_destroy(struct ofbundle
*bundle
)
2540 struct ofproto_dpif
*ofproto
;
2541 struct ofport_dpif
*port
, *next_port
;
2548 ofproto
= bundle
->ofproto
;
2549 for (i
= 0; i
< MAX_MIRRORS
; i
++) {
2550 struct ofmirror
*m
= ofproto
->mirrors
[i
];
2552 if (m
->out
== bundle
) {
2554 } else if (hmapx_find_and_delete(&m
->srcs
, bundle
)
2555 || hmapx_find_and_delete(&m
->dsts
, bundle
)) {
2556 ofproto
->backer
->need_revalidate
= REV_RECONFIGURE
;
2561 LIST_FOR_EACH_SAFE (port
, next_port
, bundle_node
, &bundle
->ports
) {
2562 bundle_del_port(port
);
2565 bundle_flush_macs(bundle
, true);
2566 hmap_remove(&ofproto
->bundles
, &bundle
->hmap_node
);
2568 free(bundle
->trunks
);
2569 lacp_destroy(bundle
->lacp
);
2570 bond_destroy(bundle
->bond
);
2575 bundle_set(struct ofproto
*ofproto_
, void *aux
,
2576 const struct ofproto_bundle_settings
*s
)
2578 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
2579 bool need_flush
= false;
2580 struct ofport_dpif
*port
;
2581 struct ofbundle
*bundle
;
2582 unsigned long *trunks
;
2588 bundle_destroy(bundle_lookup(ofproto
, aux
));
2592 ovs_assert(s
->n_slaves
== 1 || s
->bond
!= NULL
);
2593 ovs_assert((s
->lacp
!= NULL
) == (s
->lacp_slaves
!= NULL
));
2595 bundle
= bundle_lookup(ofproto
, aux
);
2597 bundle
= xmalloc(sizeof *bundle
);
2599 bundle
->ofproto
= ofproto
;
2600 hmap_insert(&ofproto
->bundles
, &bundle
->hmap_node
,
2601 hash_pointer(aux
, 0));
2603 bundle
->name
= NULL
;
2605 list_init(&bundle
->ports
);
2606 bundle
->vlan_mode
= PORT_VLAN_TRUNK
;
2608 bundle
->trunks
= NULL
;
2609 bundle
->use_priority_tags
= s
->use_priority_tags
;
2610 bundle
->lacp
= NULL
;
2611 bundle
->bond
= NULL
;
2613 bundle
->floodable
= true;
2615 bundle
->src_mirrors
= 0;
2616 bundle
->dst_mirrors
= 0;
2617 bundle
->mirror_out
= 0;
2620 if (!bundle
->name
|| strcmp(s
->name
, bundle
->name
)) {
2622 bundle
->name
= xstrdup(s
->name
);
2627 if (!bundle
->lacp
) {
2628 ofproto
->backer
->need_revalidate
= REV_RECONFIGURE
;
2629 bundle
->lacp
= lacp_create();
2631 lacp_configure(bundle
->lacp
, s
->lacp
);
2633 lacp_destroy(bundle
->lacp
);
2634 bundle
->lacp
= NULL
;
2637 /* Update set of ports. */
2639 for (i
= 0; i
< s
->n_slaves
; i
++) {
2640 if (!bundle_add_port(bundle
, s
->slaves
[i
],
2641 s
->lacp
? &s
->lacp_slaves
[i
] : NULL
)) {
2645 if (!ok
|| list_size(&bundle
->ports
) != s
->n_slaves
) {
2646 struct ofport_dpif
*next_port
;
2648 LIST_FOR_EACH_SAFE (port
, next_port
, bundle_node
, &bundle
->ports
) {
2649 for (i
= 0; i
< s
->n_slaves
; i
++) {
2650 if (s
->slaves
[i
] == port
->up
.ofp_port
) {
2655 bundle_del_port(port
);
2659 ovs_assert(list_size(&bundle
->ports
) <= s
->n_slaves
);
2661 if (list_is_empty(&bundle
->ports
)) {
2662 bundle_destroy(bundle
);
2666 /* Set VLAN tagging mode */
2667 if (s
->vlan_mode
!= bundle
->vlan_mode
2668 || s
->use_priority_tags
!= bundle
->use_priority_tags
) {
2669 bundle
->vlan_mode
= s
->vlan_mode
;
2670 bundle
->use_priority_tags
= s
->use_priority_tags
;
2675 vlan
= (s
->vlan_mode
== PORT_VLAN_TRUNK
? -1
2676 : s
->vlan
>= 0 && s
->vlan
<= 4095 ? s
->vlan
2678 if (vlan
!= bundle
->vlan
) {
2679 bundle
->vlan
= vlan
;
2683 /* Get trunked VLANs. */
2684 switch (s
->vlan_mode
) {
2685 case PORT_VLAN_ACCESS
:
2689 case PORT_VLAN_TRUNK
:
2690 trunks
= CONST_CAST(unsigned long *, s
->trunks
);
2693 case PORT_VLAN_NATIVE_UNTAGGED
:
2694 case PORT_VLAN_NATIVE_TAGGED
:
2695 if (vlan
!= 0 && (!s
->trunks
2696 || !bitmap_is_set(s
->trunks
, vlan
)
2697 || bitmap_is_set(s
->trunks
, 0))) {
2698 /* Force trunking the native VLAN and prohibit trunking VLAN 0. */
2700 trunks
= bitmap_clone(s
->trunks
, 4096);
2702 trunks
= bitmap_allocate1(4096);
2704 bitmap_set1(trunks
, vlan
);
2705 bitmap_set0(trunks
, 0);
2707 trunks
= CONST_CAST(unsigned long *, s
->trunks
);
2714 if (!vlan_bitmap_equal(trunks
, bundle
->trunks
)) {
2715 free(bundle
->trunks
);
2716 if (trunks
== s
->trunks
) {
2717 bundle
->trunks
= vlan_bitmap_clone(trunks
);
2719 bundle
->trunks
= trunks
;
2724 if (trunks
!= s
->trunks
) {
2729 if (!list_is_short(&bundle
->ports
)) {
2730 bundle
->ofproto
->has_bonded_bundles
= true;
2732 if (bond_reconfigure(bundle
->bond
, s
->bond
)) {
2733 ofproto
->backer
->need_revalidate
= REV_RECONFIGURE
;
2736 bundle
->bond
= bond_create(s
->bond
);
2737 ofproto
->backer
->need_revalidate
= REV_RECONFIGURE
;
2740 LIST_FOR_EACH (port
, bundle_node
, &bundle
->ports
) {
2741 bond_slave_register(bundle
->bond
, port
, port
->up
.netdev
);
2744 bond_destroy(bundle
->bond
);
2745 bundle
->bond
= NULL
;
2748 /* If we changed something that would affect MAC learning, un-learn
2749 * everything on this port and force flow revalidation. */
2751 bundle_flush_macs(bundle
, false);
2758 bundle_remove(struct ofport
*port_
)
2760 struct ofport_dpif
*port
= ofport_dpif_cast(port_
);
2761 struct ofbundle
*bundle
= port
->bundle
;
2764 bundle_del_port(port
);
2765 if (list_is_empty(&bundle
->ports
)) {
2766 bundle_destroy(bundle
);
2767 } else if (list_is_short(&bundle
->ports
)) {
2768 bond_destroy(bundle
->bond
);
2769 bundle
->bond
= NULL
;
2775 send_pdu_cb(void *port_
, const void *pdu
, size_t pdu_size
)
2777 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(1, 10);
2778 struct ofport_dpif
*port
= port_
;
2779 uint8_t ea
[ETH_ADDR_LEN
];
2782 error
= netdev_get_etheraddr(port
->up
.netdev
, ea
);
2784 struct ofpbuf packet
;
2787 ofpbuf_init(&packet
, 0);
2788 packet_pdu
= eth_compose(&packet
, eth_addr_lacp
, ea
, ETH_TYPE_LACP
,
2790 memcpy(packet_pdu
, pdu
, pdu_size
);
2792 send_packet(port
, &packet
);
2793 ofpbuf_uninit(&packet
);
2795 VLOG_ERR_RL(&rl
, "port %s: cannot obtain Ethernet address of iface "
2796 "%s (%s)", port
->bundle
->name
,
2797 netdev_get_name(port
->up
.netdev
), strerror(error
));
2802 bundle_send_learning_packets(struct ofbundle
*bundle
)
2804 struct ofproto_dpif
*ofproto
= bundle
->ofproto
;
2805 int error
, n_packets
, n_errors
;
2806 struct mac_entry
*e
;
2808 error
= n_packets
= n_errors
= 0;
2809 LIST_FOR_EACH (e
, lru_node
, &ofproto
->ml
->lrus
) {
2810 if (e
->port
.p
!= bundle
) {
2811 struct ofpbuf
*learning_packet
;
2812 struct ofport_dpif
*port
;
2816 /* The assignment to "port" is unnecessary but makes "grep"ing for
2817 * struct ofport_dpif more effective. */
2818 learning_packet
= bond_compose_learning_packet(bundle
->bond
,
2822 ret
= send_packet(port
, learning_packet
);
2823 ofpbuf_delete(learning_packet
);
2833 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(1, 5);
2834 VLOG_WARN_RL(&rl
, "bond %s: %d errors sending %d gratuitous learning "
2835 "packets, last error was: %s",
2836 bundle
->name
, n_errors
, n_packets
, strerror(error
));
2838 VLOG_DBG("bond %s: sent %d gratuitous learning packets",
2839 bundle
->name
, n_packets
);
2844 bundle_run(struct ofbundle
*bundle
)
2847 lacp_run(bundle
->lacp
, send_pdu_cb
);
2850 struct ofport_dpif
*port
;
2852 LIST_FOR_EACH (port
, bundle_node
, &bundle
->ports
) {
2853 bond_slave_set_may_enable(bundle
->bond
, port
, port
->may_enable
);
2856 bond_run(bundle
->bond
, &bundle
->ofproto
->backer
->revalidate_set
,
2857 lacp_status(bundle
->lacp
));
2858 if (bond_should_send_learning_packets(bundle
->bond
)) {
2859 bundle_send_learning_packets(bundle
);
2865 bundle_wait(struct ofbundle
*bundle
)
2868 lacp_wait(bundle
->lacp
);
2871 bond_wait(bundle
->bond
);
2878 mirror_scan(struct ofproto_dpif
*ofproto
)
2882 for (idx
= 0; idx
< MAX_MIRRORS
; idx
++) {
2883 if (!ofproto
->mirrors
[idx
]) {
2890 static struct ofmirror
*
2891 mirror_lookup(struct ofproto_dpif
*ofproto
, void *aux
)
2895 for (i
= 0; i
< MAX_MIRRORS
; i
++) {
2896 struct ofmirror
*mirror
= ofproto
->mirrors
[i
];
2897 if (mirror
&& mirror
->aux
== aux
) {
2905 /* Update the 'dup_mirrors' member of each of the ofmirrors in 'ofproto'. */
2907 mirror_update_dups(struct ofproto_dpif
*ofproto
)
2911 for (i
= 0; i
< MAX_MIRRORS
; i
++) {
2912 struct ofmirror
*m
= ofproto
->mirrors
[i
];
2915 m
->dup_mirrors
= MIRROR_MASK_C(1) << i
;
2919 for (i
= 0; i
< MAX_MIRRORS
; i
++) {
2920 struct ofmirror
*m1
= ofproto
->mirrors
[i
];
2927 for (j
= i
+ 1; j
< MAX_MIRRORS
; j
++) {
2928 struct ofmirror
*m2
= ofproto
->mirrors
[j
];
2930 if (m2
&& m1
->out
== m2
->out
&& m1
->out_vlan
== m2
->out_vlan
) {
2931 m1
->dup_mirrors
|= MIRROR_MASK_C(1) << j
;
2932 m2
->dup_mirrors
|= m1
->dup_mirrors
;
2939 mirror_set(struct ofproto
*ofproto_
, void *aux
,
2940 const struct ofproto_mirror_settings
*s
)
2942 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
2943 mirror_mask_t mirror_bit
;
2944 struct ofbundle
*bundle
;
2945 struct ofmirror
*mirror
;
2946 struct ofbundle
*out
;
2947 struct hmapx srcs
; /* Contains "struct ofbundle *"s. */
2948 struct hmapx dsts
; /* Contains "struct ofbundle *"s. */
2951 mirror
= mirror_lookup(ofproto
, aux
);
2953 mirror_destroy(mirror
);
2959 idx
= mirror_scan(ofproto
);
2961 VLOG_WARN("bridge %s: maximum of %d port mirrors reached, "
2963 ofproto
->up
.name
, MAX_MIRRORS
, s
->name
);
2967 mirror
= ofproto
->mirrors
[idx
] = xzalloc(sizeof *mirror
);
2968 mirror
->ofproto
= ofproto
;
2971 mirror
->out_vlan
= -1;
2972 mirror
->name
= NULL
;
2975 if (!mirror
->name
|| strcmp(s
->name
, mirror
->name
)) {
2977 mirror
->name
= xstrdup(s
->name
);
2980 /* Get the new configuration. */
2981 if (s
->out_bundle
) {
2982 out
= bundle_lookup(ofproto
, s
->out_bundle
);
2984 mirror_destroy(mirror
);
2990 out_vlan
= s
->out_vlan
;
2992 bundle_lookup_multiple(ofproto
, s
->srcs
, s
->n_srcs
, &srcs
);
2993 bundle_lookup_multiple(ofproto
, s
->dsts
, s
->n_dsts
, &dsts
);
2995 /* If the configuration has not changed, do nothing. */
2996 if (hmapx_equals(&srcs
, &mirror
->srcs
)
2997 && hmapx_equals(&dsts
, &mirror
->dsts
)
2998 && vlan_bitmap_equal(mirror
->vlans
, s
->src_vlans
)
2999 && mirror
->out
== out
3000 && mirror
->out_vlan
== out_vlan
)
3002 hmapx_destroy(&srcs
);
3003 hmapx_destroy(&dsts
);
3007 hmapx_swap(&srcs
, &mirror
->srcs
);
3008 hmapx_destroy(&srcs
);
3010 hmapx_swap(&dsts
, &mirror
->dsts
);
3011 hmapx_destroy(&dsts
);
3013 free(mirror
->vlans
);
3014 mirror
->vlans
= vlan_bitmap_clone(s
->src_vlans
);
3017 mirror
->out_vlan
= out_vlan
;
3019 /* Update bundles. */
3020 mirror_bit
= MIRROR_MASK_C(1) << mirror
->idx
;
3021 HMAP_FOR_EACH (bundle
, hmap_node
, &mirror
->ofproto
->bundles
) {
3022 if (hmapx_contains(&mirror
->srcs
, bundle
)) {
3023 bundle
->src_mirrors
|= mirror_bit
;
3025 bundle
->src_mirrors
&= ~mirror_bit
;
3028 if (hmapx_contains(&mirror
->dsts
, bundle
)) {
3029 bundle
->dst_mirrors
|= mirror_bit
;
3031 bundle
->dst_mirrors
&= ~mirror_bit
;
3034 if (mirror
->out
== bundle
) {
3035 bundle
->mirror_out
|= mirror_bit
;
3037 bundle
->mirror_out
&= ~mirror_bit
;
3041 ofproto
->backer
->need_revalidate
= REV_RECONFIGURE
;
3042 ofproto
->has_mirrors
= true;
3043 mac_learning_flush(ofproto
->ml
,
3044 &ofproto
->backer
->revalidate_set
);
3045 mirror_update_dups(ofproto
);
3051 mirror_destroy(struct ofmirror
*mirror
)
3053 struct ofproto_dpif
*ofproto
;
3054 mirror_mask_t mirror_bit
;
3055 struct ofbundle
*bundle
;
3062 ofproto
= mirror
->ofproto
;
3063 ofproto
->backer
->need_revalidate
= REV_RECONFIGURE
;
3064 mac_learning_flush(ofproto
->ml
, &ofproto
->backer
->revalidate_set
);
3066 mirror_bit
= MIRROR_MASK_C(1) << mirror
->idx
;
3067 HMAP_FOR_EACH (bundle
, hmap_node
, &ofproto
->bundles
) {
3068 bundle
->src_mirrors
&= ~mirror_bit
;
3069 bundle
->dst_mirrors
&= ~mirror_bit
;
3070 bundle
->mirror_out
&= ~mirror_bit
;
3073 hmapx_destroy(&mirror
->srcs
);
3074 hmapx_destroy(&mirror
->dsts
);
3075 free(mirror
->vlans
);
3077 ofproto
->mirrors
[mirror
->idx
] = NULL
;
3081 mirror_update_dups(ofproto
);
3083 ofproto
->has_mirrors
= false;
3084 for (i
= 0; i
< MAX_MIRRORS
; i
++) {
3085 if (ofproto
->mirrors
[i
]) {
3086 ofproto
->has_mirrors
= true;
3093 mirror_get_stats(struct ofproto
*ofproto_
, void *aux
,
3094 uint64_t *packets
, uint64_t *bytes
)
3096 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
3097 struct ofmirror
*mirror
= mirror_lookup(ofproto
, aux
);
3100 *packets
= *bytes
= UINT64_MAX
;
3106 *packets
= mirror
->packet_count
;
3107 *bytes
= mirror
->byte_count
;
3113 set_flood_vlans(struct ofproto
*ofproto_
, unsigned long *flood_vlans
)
3115 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
3116 if (mac_learning_set_flood_vlans(ofproto
->ml
, flood_vlans
)) {
3117 mac_learning_flush(ofproto
->ml
, &ofproto
->backer
->revalidate_set
);
3123 is_mirror_output_bundle(const struct ofproto
*ofproto_
, void *aux
)
3125 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
3126 struct ofbundle
*bundle
= bundle_lookup(ofproto
, aux
);
3127 return bundle
&& bundle
->mirror_out
!= 0;
3131 forward_bpdu_changed(struct ofproto
*ofproto_
)
3133 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
3134 ofproto
->backer
->need_revalidate
= REV_RECONFIGURE
;
3138 set_mac_table_config(struct ofproto
*ofproto_
, unsigned int idle_time
,
3141 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
3142 mac_learning_set_idle_time(ofproto
->ml
, idle_time
);
3143 mac_learning_set_max_entries(ofproto
->ml
, max_entries
);
3148 static struct ofport_dpif
*
3149 get_ofp_port(const struct ofproto_dpif
*ofproto
, uint16_t ofp_port
)
3151 struct ofport
*ofport
= ofproto_get_port(&ofproto
->up
, ofp_port
);
3152 return ofport
? ofport_dpif_cast(ofport
) : NULL
;
3155 static struct ofport_dpif
*
3156 get_odp_port(const struct ofproto_dpif
*ofproto
, uint32_t odp_port
)
3158 struct ofport_dpif
*port
= odp_port_to_ofport(ofproto
->backer
, odp_port
);
3159 return port
&& &ofproto
->up
== port
->up
.ofproto
? port
: NULL
;
3163 ofproto_port_from_dpif_port(struct ofproto_dpif
*ofproto
,
3164 struct ofproto_port
*ofproto_port
,
3165 struct dpif_port
*dpif_port
)
3167 ofproto_port
->name
= dpif_port
->name
;
3168 ofproto_port
->type
= dpif_port
->type
;
3169 ofproto_port
->ofp_port
= odp_port_to_ofp_port(ofproto
, dpif_port
->port_no
);
3172 static struct ofport_dpif
*
3173 ofport_get_peer(const struct ofport_dpif
*ofport_dpif
)
3175 const struct ofproto_dpif
*ofproto
;
3178 peer
= netdev_vport_patch_peer(ofport_dpif
->up
.netdev
);
3183 HMAP_FOR_EACH (ofproto
, all_ofproto_dpifs_node
, &all_ofproto_dpifs
) {
3184 struct ofport
*ofport
;
3186 ofport
= shash_find_data(&ofproto
->up
.port_by_name
, peer
);
3187 if (ofport
&& ofport
->ofproto
->ofproto_class
== &ofproto_dpif_class
) {
3188 return ofport_dpif_cast(ofport
);
3195 port_run_fast(struct ofport_dpif
*ofport
)
3197 if (ofport
->cfm
&& cfm_should_send_ccm(ofport
->cfm
)) {
3198 struct ofpbuf packet
;
3200 ofpbuf_init(&packet
, 0);
3201 cfm_compose_ccm(ofport
->cfm
, &packet
, ofport
->up
.pp
.hw_addr
);
3202 send_packet(ofport
, &packet
);
3203 ofpbuf_uninit(&packet
);
3206 if (ofport
->bfd
&& bfd_should_send_packet(ofport
->bfd
)) {
3207 struct ofpbuf packet
;
3209 ofpbuf_init(&packet
, 0);
3210 bfd_put_packet(ofport
->bfd
, &packet
, ofport
->up
.pp
.hw_addr
);
3211 send_packet(ofport
, &packet
);
3212 ofpbuf_uninit(&packet
);
3217 port_run(struct ofport_dpif
*ofport
)
3219 long long int carrier_seq
= netdev_get_carrier_resets(ofport
->up
.netdev
);
3220 bool carrier_changed
= carrier_seq
!= ofport
->carrier_seq
;
3221 bool enable
= netdev_get_carrier(ofport
->up
.netdev
);
3223 ofport
->carrier_seq
= carrier_seq
;
3225 port_run_fast(ofport
);
3227 if (ofport
->tnl_port
3228 && tnl_port_reconfigure(&ofport
->up
, ofport
->odp_port
,
3229 &ofport
->tnl_port
)) {
3230 ofproto_dpif_cast(ofport
->up
.ofproto
)->backer
->need_revalidate
= true;
3234 int cfm_opup
= cfm_get_opup(ofport
->cfm
);
3236 cfm_run(ofport
->cfm
);
3237 enable
= enable
&& !cfm_get_fault(ofport
->cfm
);
3239 if (cfm_opup
>= 0) {
3240 enable
= enable
&& cfm_opup
;
3245 bfd_run(ofport
->bfd
);
3246 enable
= enable
&& bfd_forwarding(ofport
->bfd
);
3249 if (ofport
->bundle
) {
3250 enable
= enable
&& lacp_slave_may_enable(ofport
->bundle
->lacp
, ofport
);
3251 if (carrier_changed
) {
3252 lacp_slave_carrier_changed(ofport
->bundle
->lacp
, ofport
);
3256 if (ofport
->may_enable
!= enable
) {
3257 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofport
->up
.ofproto
);
3259 if (ofproto
->has_bundle_action
) {
3260 ofproto
->backer
->need_revalidate
= REV_PORT_TOGGLED
;
3264 ofport
->may_enable
= enable
;
3268 port_wait(struct ofport_dpif
*ofport
)
3271 cfm_wait(ofport
->cfm
);
3275 bfd_wait(ofport
->bfd
);
3280 port_query_by_name(const struct ofproto
*ofproto_
, const char *devname
,
3281 struct ofproto_port
*ofproto_port
)
3283 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
3284 struct dpif_port dpif_port
;
3287 if (sset_contains(&ofproto
->ghost_ports
, devname
)) {
3288 const char *type
= netdev_get_type_from_name(devname
);
3290 /* We may be called before ofproto->up.port_by_name is populated with
3291 * the appropriate ofport. For this reason, we must get the name and
3292 * type from the netdev layer directly. */
3294 const struct ofport
*ofport
;
3296 ofport
= shash_find_data(&ofproto
->up
.port_by_name
, devname
);
3297 ofproto_port
->ofp_port
= ofport
? ofport
->ofp_port
: OFPP_NONE
;
3298 ofproto_port
->name
= xstrdup(devname
);
3299 ofproto_port
->type
= xstrdup(type
);
3305 if (!sset_contains(&ofproto
->ports
, devname
)) {
3308 error
= dpif_port_query_by_name(ofproto
->backer
->dpif
,
3309 devname
, &dpif_port
);
3311 ofproto_port_from_dpif_port(ofproto
, ofproto_port
, &dpif_port
);
3317 port_add(struct ofproto
*ofproto_
, struct netdev
*netdev
)
3319 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
3320 const char *dp_port_name
= netdev_vport_get_dpif_port(netdev
);
3321 const char *devname
= netdev_get_name(netdev
);
3323 if (netdev_vport_is_patch(netdev
)) {
3324 sset_add(&ofproto
->ghost_ports
, netdev_get_name(netdev
));
3328 if (!dpif_port_exists(ofproto
->backer
->dpif
, dp_port_name
)) {
3329 uint32_t port_no
= UINT32_MAX
;
3332 error
= dpif_port_add(ofproto
->backer
->dpif
, netdev
, &port_no
);
3336 if (netdev_get_tunnel_config(netdev
)) {
3337 simap_put(&ofproto
->backer
->tnl_backers
, dp_port_name
, port_no
);
3341 if (netdev_get_tunnel_config(netdev
)) {
3342 sset_add(&ofproto
->ghost_ports
, devname
);
3344 sset_add(&ofproto
->ports
, devname
);
3350 port_del(struct ofproto
*ofproto_
, uint16_t ofp_port
)
3352 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
3353 struct ofport_dpif
*ofport
= get_ofp_port(ofproto
, ofp_port
);
3360 sset_find_and_delete(&ofproto
->ghost_ports
,
3361 netdev_get_name(ofport
->up
.netdev
));
3362 ofproto
->backer
->need_revalidate
= REV_RECONFIGURE
;
3363 if (!ofport
->tnl_port
) {
3364 error
= dpif_port_del(ofproto
->backer
->dpif
, ofport
->odp_port
);
3366 /* The caller is going to close ofport->up.netdev. If this is a
3367 * bonded port, then the bond is using that netdev, so remove it
3368 * from the bond. The client will need to reconfigure everything
3369 * after deleting ports, so then the slave will get re-added. */
3370 bundle_remove(&ofport
->up
);
3377 port_get_stats(const struct ofport
*ofport_
, struct netdev_stats
*stats
)
3379 struct ofport_dpif
*ofport
= ofport_dpif_cast(ofport_
);
3384 error
= netdev_get_stats(ofport
->up
.netdev
, stats
);
3386 if (!error
&& ofport_
->ofp_port
== OFPP_LOCAL
) {
3387 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofport
->up
.ofproto
);
3389 /* ofproto->stats.tx_packets represents packets that we created
3390 * internally and sent to some port (e.g. packets sent with
3391 * send_packet()). Account for them as if they had come from
3392 * OFPP_LOCAL and got forwarded. */
3394 if (stats
->rx_packets
!= UINT64_MAX
) {
3395 stats
->rx_packets
+= ofproto
->stats
.tx_packets
;
3398 if (stats
->rx_bytes
!= UINT64_MAX
) {
3399 stats
->rx_bytes
+= ofproto
->stats
.tx_bytes
;
3402 /* ofproto->stats.rx_packets represents packets that were received on
3403 * some port and we processed internally and dropped (e.g. STP).
3404 * Account for them as if they had been forwarded to OFPP_LOCAL. */
3406 if (stats
->tx_packets
!= UINT64_MAX
) {
3407 stats
->tx_packets
+= ofproto
->stats
.rx_packets
;
3410 if (stats
->tx_bytes
!= UINT64_MAX
) {
3411 stats
->tx_bytes
+= ofproto
->stats
.rx_bytes
;
3418 struct port_dump_state
{
3423 struct ofproto_port port
;
3428 port_dump_start(const struct ofproto
*ofproto_ OVS_UNUSED
, void **statep
)
3430 *statep
= xzalloc(sizeof(struct port_dump_state
));
3435 port_dump_next(const struct ofproto
*ofproto_
, void *state_
,
3436 struct ofproto_port
*port
)
3438 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
3439 struct port_dump_state
*state
= state_
;
3440 const struct sset
*sset
;
3441 struct sset_node
*node
;
3443 if (state
->has_port
) {
3444 ofproto_port_destroy(&state
->port
);
3445 state
->has_port
= false;
3447 sset
= state
->ghost
? &ofproto
->ghost_ports
: &ofproto
->ports
;
3448 while ((node
= sset_at_position(sset
, &state
->bucket
, &state
->offset
))) {
3451 error
= port_query_by_name(ofproto_
, node
->name
, &state
->port
);
3453 *port
= state
->port
;
3454 state
->has_port
= true;
3456 } else if (error
!= ENODEV
) {
3461 if (!state
->ghost
) {
3462 state
->ghost
= true;
3465 return port_dump_next(ofproto_
, state_
, port
);
3472 port_dump_done(const struct ofproto
*ofproto_ OVS_UNUSED
, void *state_
)
3474 struct port_dump_state
*state
= state_
;
3476 if (state
->has_port
) {
3477 ofproto_port_destroy(&state
->port
);
3484 port_poll(const struct ofproto
*ofproto_
, char **devnamep
)
3486 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
3488 if (ofproto
->port_poll_errno
) {
3489 int error
= ofproto
->port_poll_errno
;
3490 ofproto
->port_poll_errno
= 0;
3494 if (sset_is_empty(&ofproto
->port_poll_set
)) {
3498 *devnamep
= sset_pop(&ofproto
->port_poll_set
);
3503 port_poll_wait(const struct ofproto
*ofproto_
)
3505 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
3506 dpif_port_poll_wait(ofproto
->backer
->dpif
);
3510 port_is_lacp_current(const struct ofport
*ofport_
)
3512 const struct ofport_dpif
*ofport
= ofport_dpif_cast(ofport_
);
3513 return (ofport
->bundle
&& ofport
->bundle
->lacp
3514 ? lacp_slave_is_current(ofport
->bundle
->lacp
, ofport
)
3518 /* Upcall handling. */
3520 /* Flow miss batching.
3522 * Some dpifs implement operations faster when you hand them off in a batch.
3523 * To allow batching, "struct flow_miss" queues the dpif-related work needed
3524 * for a given flow. Each "struct flow_miss" corresponds to sending one or
3525 * more packets, plus possibly installing the flow in the dpif.
3527 * So far we only batch the operations that affect flow setup time the most.
3528 * It's possible to batch more than that, but the benefit might be minimal. */
3530 struct hmap_node hmap_node
;
3531 struct ofproto_dpif
*ofproto
;
3533 enum odp_key_fitness key_fitness
;
3534 const struct nlattr
*key
;
3536 struct initial_vals initial_vals
;
3537 struct list packets
;
3538 enum dpif_upcall_type upcall_type
;
3541 struct flow_miss_op
{
3542 struct dpif_op dpif_op
;
3544 uint64_t slow_stub
[128 / 8]; /* Buffer for compose_slow_path() */
3545 struct xlate_out xout
;
3546 bool xout_garbage
; /* 'xout' needs to be uninitialized? */
3549 /* Sends an OFPT_PACKET_IN message for 'packet' of type OFPR_NO_MATCH to each
3550 * OpenFlow controller as necessary according to their individual
3551 * configurations. */
3553 send_packet_in_miss(struct ofproto_dpif
*ofproto
, const struct ofpbuf
*packet
,
3554 const struct flow
*flow
)
3556 struct ofputil_packet_in pin
;
3558 pin
.packet
= packet
->data
;
3559 pin
.packet_len
= packet
->size
;
3560 pin
.reason
= OFPR_NO_MATCH
;
3561 pin
.controller_id
= 0;
3566 pin
.send_len
= 0; /* not used for flow table misses */
3568 flow_get_metadata(flow
, &pin
.fmd
);
3570 connmgr_send_packet_in(ofproto
->up
.connmgr
, &pin
);
3573 static enum slow_path_reason
3574 process_special(struct ofproto_dpif
*ofproto
, const struct flow
*flow
,
3575 const struct ofport_dpif
*ofport
, const struct ofpbuf
*packet
)
3579 } else if (ofport
->cfm
&& cfm_should_process_flow(ofport
->cfm
, flow
)) {
3581 cfm_process_heartbeat(ofport
->cfm
, packet
);
3584 } else if (ofport
->bfd
&& bfd_should_process_flow(flow
)) {
3586 bfd_process_packet(ofport
->bfd
, flow
, packet
);
3589 } else if (ofport
->bundle
&& ofport
->bundle
->lacp
3590 && flow
->dl_type
== htons(ETH_TYPE_LACP
)) {
3592 lacp_process_packet(ofport
->bundle
->lacp
, ofport
, packet
);
3595 } else if (ofproto
->stp
&& stp_should_process_flow(flow
)) {
3597 stp_process_packet(ofport
, packet
);
3605 static struct flow_miss
*
3606 flow_miss_find(struct hmap
*todo
, const struct ofproto_dpif
*ofproto
,
3607 const struct flow
*flow
, uint32_t hash
)
3609 struct flow_miss
*miss
;
3611 HMAP_FOR_EACH_WITH_HASH (miss
, hmap_node
, hash
, todo
) {
3612 if (miss
->ofproto
== ofproto
&& flow_equal(&miss
->flow
, flow
)) {
3620 /* Partially Initializes 'op' as an "execute" operation for 'miss' and
3621 * 'packet'. The caller must initialize op->actions and op->actions_len. If
3622 * 'miss' is associated with a subfacet the caller must also initialize the
3623 * returned op->subfacet, and if anything needs to be freed after processing
3624 * the op, the caller must initialize op->garbage also. */
3626 init_flow_miss_execute_op(struct flow_miss
*miss
, struct ofpbuf
*packet
,
3627 struct flow_miss_op
*op
)
3629 if (miss
->flow
.vlan_tci
!= miss
->initial_vals
.vlan_tci
) {
3630 /* This packet was received on a VLAN splinter port. We
3631 * added a VLAN to the packet to make the packet resemble
3632 * the flow, but the actions were composed assuming that
3633 * the packet contained no VLAN. So, we must remove the
3634 * VLAN header from the packet before trying to execute the
3636 eth_pop_vlan(packet
);
3639 op
->xout_garbage
= false;
3640 op
->dpif_op
.type
= DPIF_OP_EXECUTE
;
3641 op
->dpif_op
.u
.execute
.key
= miss
->key
;
3642 op
->dpif_op
.u
.execute
.key_len
= miss
->key_len
;
3643 op
->dpif_op
.u
.execute
.packet
= packet
;
3646 /* Helper for handle_flow_miss_without_facet() and
3647 * handle_flow_miss_with_facet(). */
3649 handle_flow_miss_common(struct rule_dpif
*rule
,
3650 struct ofpbuf
*packet
, const struct flow
*flow
)
3652 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(rule
->up
.ofproto
);
3654 if (rule
->up
.cr
.priority
== FAIL_OPEN_PRIORITY
) {
3656 * Extra-special case for fail-open mode.
3658 * We are in fail-open mode and the packet matched the fail-open
3659 * rule, but we are connected to a controller too. We should send
3660 * the packet up to the controller in the hope that it will try to
3661 * set up a flow and thereby allow us to exit fail-open.
3663 * See the top-level comment in fail-open.c for more information.
3665 send_packet_in_miss(ofproto
, packet
, flow
);
3669 /* Figures out whether a flow that missed in 'ofproto', whose details are in
3670 * 'miss', is likely to be worth tracking in detail in userspace and (usually)
3671 * installing a datapath flow. The answer is usually "yes" (a return value of
3672 * true). However, for short flows the cost of bookkeeping is much higher than
3673 * the benefits, so when the datapath holds a large number of flows we impose
3674 * some heuristics to decide which flows are likely to be worth tracking. */
3676 flow_miss_should_make_facet(struct ofproto_dpif
*ofproto
,
3677 struct flow_miss
*miss
, uint32_t hash
)
3679 if (!ofproto
->governor
) {
3682 n_subfacets
= hmap_count(&ofproto
->subfacets
);
3683 if (n_subfacets
* 2 <= ofproto
->up
.flow_eviction_threshold
) {
3687 ofproto
->governor
= governor_create(ofproto
->up
.name
);
3690 return governor_should_install_flow(ofproto
->governor
, hash
,
3691 list_size(&miss
->packets
));
3694 /* Handles 'miss', which matches 'rule', without creating a facet or subfacet
3695 * or creating any datapath flow. May add an "execute" operation to 'ops' and
3696 * increment '*n_ops'. */
3698 handle_flow_miss_without_facet(struct flow_miss
*miss
,
3699 struct flow_miss_op
*ops
, size_t *n_ops
)
3701 struct rule_dpif
*rule
= rule_dpif_lookup(miss
->ofproto
, &miss
->flow
);
3702 long long int now
= time_msec();
3703 struct ofpbuf
*packet
;
3704 struct xlate_in xin
;
3706 LIST_FOR_EACH (packet
, list_node
, &miss
->packets
) {
3707 struct flow_miss_op
*op
= &ops
[*n_ops
];
3708 struct dpif_flow_stats stats
;
3710 COVERAGE_INC(facet_suppress
);
3712 handle_flow_miss_common(rule
, packet
, &miss
->flow
);
3714 dpif_flow_stats_extract(&miss
->flow
, packet
, now
, &stats
);
3715 rule_credit_stats(rule
, &stats
);
3717 xlate_in_init(&xin
, miss
->ofproto
, &miss
->flow
, &miss
->initial_vals
,
3718 rule
, stats
.tcp_flags
, packet
);
3719 xin
.resubmit_stats
= &stats
;
3720 xlate_actions(&xin
, &op
->xout
);
3722 if (op
->xout
.odp_actions
.size
) {
3723 struct dpif_execute
*execute
= &op
->dpif_op
.u
.execute
;
3725 init_flow_miss_execute_op(miss
, packet
, op
);
3726 execute
->actions
= op
->xout
.odp_actions
.data
;
3727 execute
->actions_len
= op
->xout
.odp_actions
.size
;
3728 op
->xout_garbage
= true;
3732 xlate_out_uninit(&op
->xout
);
3737 /* Handles 'miss', which matches 'facet'. May add any required datapath
3738 * operations to 'ops', incrementing '*n_ops' for each new op.
3740 * All of the packets in 'miss' are considered to have arrived at time 'now'.
3741 * This is really important only for new facets: if we just called time_msec()
3742 * here, then the new subfacet or its packets could look (occasionally) as
3743 * though it was used some time after the facet was used. That can make a
3744 * one-packet flow look like it has a nonzero duration, which looks odd in
3745 * e.g. NetFlow statistics. */
3747 handle_flow_miss_with_facet(struct flow_miss
*miss
, struct facet
*facet
,
3749 struct flow_miss_op
*ops
, size_t *n_ops
)
3751 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(facet
->rule
->up
.ofproto
);
3752 enum subfacet_path want_path
;
3753 struct subfacet
*subfacet
;
3754 struct ofpbuf
*packet
;
3756 subfacet
= subfacet_create(facet
, miss
, now
);
3757 want_path
= subfacet
->facet
->xout
.slow
? SF_SLOW_PATH
: SF_FAST_PATH
;
3759 LIST_FOR_EACH (packet
, list_node
, &miss
->packets
) {
3760 struct flow_miss_op
*op
= &ops
[*n_ops
];
3761 struct dpif_flow_stats stats
;
3763 handle_flow_miss_common(facet
->rule
, packet
, &miss
->flow
);
3765 if (want_path
!= SF_FAST_PATH
) {
3766 struct xlate_in xin
;
3768 xlate_in_init(&xin
, ofproto
, &facet
->flow
, &facet
->initial_vals
,
3769 facet
->rule
, 0, packet
);
3770 xlate_actions_for_side_effects(&xin
);
3773 dpif_flow_stats_extract(&facet
->flow
, packet
, now
, &stats
);
3774 subfacet_update_stats(subfacet
, &stats
);
3776 if (facet
->xout
.odp_actions
.size
) {
3777 struct dpif_execute
*execute
= &op
->dpif_op
.u
.execute
;
3779 init_flow_miss_execute_op(miss
, packet
, op
);
3780 execute
->actions
= facet
->xout
.odp_actions
.data
,
3781 execute
->actions_len
= facet
->xout
.odp_actions
.size
;
3786 if (miss
->upcall_type
== DPIF_UC_MISS
|| subfacet
->path
!= want_path
) {
3787 struct flow_miss_op
*op
= &ops
[(*n_ops
)++];
3788 struct dpif_flow_put
*put
= &op
->dpif_op
.u
.flow_put
;
3790 subfacet
->path
= want_path
;
3792 op
->xout_garbage
= false;
3793 op
->dpif_op
.type
= DPIF_OP_FLOW_PUT
;
3794 put
->flags
= DPIF_FP_CREATE
| DPIF_FP_MODIFY
;
3795 put
->key
= miss
->key
;
3796 put
->key_len
= miss
->key_len
;
3797 if (want_path
== SF_FAST_PATH
) {
3798 put
->actions
= facet
->xout
.odp_actions
.data
;
3799 put
->actions_len
= facet
->xout
.odp_actions
.size
;
3801 compose_slow_path(ofproto
, &facet
->flow
, facet
->xout
.slow
,
3802 op
->slow_stub
, sizeof op
->slow_stub
,
3803 &put
->actions
, &put
->actions_len
);
3809 /* Handles flow miss 'miss'. May add any required datapath operations
3810 * to 'ops', incrementing '*n_ops' for each new op. */
3812 handle_flow_miss(struct flow_miss
*miss
, struct flow_miss_op
*ops
,
3815 struct ofproto_dpif
*ofproto
= miss
->ofproto
;
3816 struct facet
*facet
;
3820 /* The caller must ensure that miss->hmap_node.hash contains
3821 * flow_hash(miss->flow, 0). */
3822 hash
= miss
->hmap_node
.hash
;
3824 facet
= facet_lookup_valid(ofproto
, &miss
->flow
, hash
);
3826 /* There does not exist a bijection between 'struct flow' and datapath
3827 * flow keys with fitness ODP_FIT_TO_LITTLE. This breaks a fundamental
3828 * assumption used throughout the facet and subfacet handling code.
3829 * Since we have to handle these misses in userspace anyway, we simply
3830 * skip facet creation, avoiding the problem altogether. */
3831 if (miss
->key_fitness
== ODP_FIT_TOO_LITTLE
3832 || !flow_miss_should_make_facet(ofproto
, miss
, hash
)) {
3833 handle_flow_miss_without_facet(miss
, ops
, n_ops
);
3837 facet
= facet_create(miss
, hash
);
3842 handle_flow_miss_with_facet(miss
, facet
, now
, ops
, n_ops
);
3845 static struct drop_key
*
3846 drop_key_lookup(const struct dpif_backer
*backer
, const struct nlattr
*key
,
3849 struct drop_key
*drop_key
;
3851 HMAP_FOR_EACH_WITH_HASH (drop_key
, hmap_node
, hash_bytes(key
, key_len
, 0),
3852 &backer
->drop_keys
) {
3853 if (drop_key
->key_len
== key_len
3854 && !memcmp(drop_key
->key
, key
, key_len
)) {
3862 drop_key_clear(struct dpif_backer
*backer
)
3864 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(1, 15);
3865 struct drop_key
*drop_key
, *next
;
3867 HMAP_FOR_EACH_SAFE (drop_key
, next
, hmap_node
, &backer
->drop_keys
) {
3870 error
= dpif_flow_del(backer
->dpif
, drop_key
->key
, drop_key
->key_len
,
3872 if (error
&& !VLOG_DROP_WARN(&rl
)) {
3873 struct ds ds
= DS_EMPTY_INITIALIZER
;
3874 odp_flow_key_format(drop_key
->key
, drop_key
->key_len
, &ds
);
3875 VLOG_WARN("Failed to delete drop key (%s) (%s)", strerror(error
),
3880 hmap_remove(&backer
->drop_keys
, &drop_key
->hmap_node
);
3881 free(drop_key
->key
);
3886 /* Given a datpath, packet, and flow metadata ('backer', 'packet', and 'key'
3887 * respectively), populates 'flow' with the result of odp_flow_key_to_flow().
3888 * Optionally, if nonnull, populates 'fitnessp' with the fitness of 'flow' as
3889 * returned by odp_flow_key_to_flow(). Also, optionally populates 'ofproto'
3890 * with the ofproto_dpif, and 'odp_in_port' with the datapath in_port, that
3891 * 'packet' ingressed.
3893 * If 'ofproto' is nonnull, requires 'flow''s in_port to exist. Otherwise sets
3894 * 'flow''s in_port to OFPP_NONE.
3896 * This function does post-processing on data returned from
3897 * odp_flow_key_to_flow() to help make VLAN splinters transparent to the rest
3898 * of the upcall processing logic. In particular, if the extracted in_port is
3899 * a VLAN splinter port, it replaces flow->in_port by the "real" port, sets
3900 * flow->vlan_tci correctly for the VLAN of the VLAN splinter port, and pushes
3901 * a VLAN header onto 'packet' (if it is nonnull).
3903 * Optionally, if 'initial_vals' is nonnull, sets 'initial_vals->vlan_tci'
3904 * to the VLAN TCI with which the packet was really received, that is, the
3905 * actual VLAN TCI extracted by odp_flow_key_to_flow(). (This differs from
3906 * the value returned in flow->vlan_tci only for packets received on
3909 * Similarly, this function also includes some logic to help with tunnels. It
3910 * may modify 'flow' as necessary to make the tunneling implementation
3911 * transparent to the upcall processing logic.
3913 * Returns 0 if successful, ENODEV if the parsed flow has no associated ofport,
3914 * or some other positive errno if there are other problems. */
3916 ofproto_receive(const struct dpif_backer
*backer
, struct ofpbuf
*packet
,
3917 const struct nlattr
*key
, size_t key_len
,
3918 struct flow
*flow
, enum odp_key_fitness
*fitnessp
,
3919 struct ofproto_dpif
**ofproto
, uint32_t *odp_in_port
,
3920 struct initial_vals
*initial_vals
)
3922 const struct ofport_dpif
*port
;
3923 enum odp_key_fitness fitness
;
3926 fitness
= odp_flow_key_to_flow(key
, key_len
, flow
);
3927 if (fitness
== ODP_FIT_ERROR
) {
3933 initial_vals
->vlan_tci
= flow
->vlan_tci
;
3937 *odp_in_port
= flow
->in_port
;
3940 port
= (tnl_port_should_receive(flow
)
3941 ? ofport_dpif_cast(tnl_port_receive(flow
))
3942 : odp_port_to_ofport(backer
, flow
->in_port
));
3943 flow
->in_port
= port
? port
->up
.ofp_port
: OFPP_NONE
;
3948 /* XXX: Since the tunnel module is not scoped per backer, for a tunnel port
3949 * it's theoretically possible that we'll receive an ofport belonging to an
3950 * entirely different datapath. In practice, this can't happen because no
3951 * platforms has two separate datapaths which each support tunneling. */
3952 ovs_assert(ofproto_dpif_cast(port
->up
.ofproto
)->backer
== backer
);
3954 if (vsp_adjust_flow(ofproto_dpif_cast(port
->up
.ofproto
), flow
)) {
3956 /* Make the packet resemble the flow, so that it gets sent to
3957 * an OpenFlow controller properly, so that it looks correct
3958 * for sFlow, and so that flow_extract() will get the correct
3959 * vlan_tci if it is called on 'packet'.
3961 * The allocated space inside 'packet' probably also contains
3962 * 'key', that is, both 'packet' and 'key' are probably part of
3963 * a struct dpif_upcall (see the large comment on that
3964 * structure definition), so pushing data on 'packet' is in
3965 * general not a good idea since it could overwrite 'key' or
3966 * free it as a side effect. However, it's OK in this special
3967 * case because we know that 'packet' is inside a Netlink
3968 * attribute: pushing 4 bytes will just overwrite the 4-byte
3969 * "struct nlattr", which is fine since we don't need that
3970 * header anymore. */
3971 eth_push_vlan(packet
, flow
->vlan_tci
);
3973 /* We can't reproduce 'key' from 'flow'. */
3974 fitness
= fitness
== ODP_FIT_PERFECT
? ODP_FIT_TOO_MUCH
: fitness
;
3979 *ofproto
= ofproto_dpif_cast(port
->up
.ofproto
);
3984 *fitnessp
= fitness
;
3990 handle_miss_upcalls(struct dpif_backer
*backer
, struct dpif_upcall
*upcalls
,
3993 struct dpif_upcall
*upcall
;
3994 struct flow_miss
*miss
;
3995 struct flow_miss misses
[FLOW_MISS_MAX_BATCH
];
3996 struct flow_miss_op flow_miss_ops
[FLOW_MISS_MAX_BATCH
* 2];
3997 struct dpif_op
*dpif_ops
[FLOW_MISS_MAX_BATCH
* 2];
4007 /* Construct the to-do list.
4009 * This just amounts to extracting the flow from each packet and sticking
4010 * the packets that have the same flow in the same "flow_miss" structure so
4011 * that we can process them together. */
4014 for (upcall
= upcalls
; upcall
< &upcalls
[n_upcalls
]; upcall
++) {
4015 struct flow_miss
*miss
= &misses
[n_misses
];
4016 struct flow_miss
*existing_miss
;
4017 struct ofproto_dpif
*ofproto
;
4018 uint32_t odp_in_port
;
4023 error
= ofproto_receive(backer
, upcall
->packet
, upcall
->key
,
4024 upcall
->key_len
, &flow
, &miss
->key_fitness
,
4025 &ofproto
, &odp_in_port
, &miss
->initial_vals
);
4026 if (error
== ENODEV
) {
4027 struct drop_key
*drop_key
;
4029 /* Received packet on datapath port for which we couldn't
4030 * associate an ofproto. This can happen if a port is removed
4031 * while traffic is being received. Print a rate-limited message
4032 * in case it happens frequently. Install a drop flow so
4033 * that future packets of the flow are inexpensively dropped
4035 VLOG_INFO_RL(&rl
, "received packet on unassociated datapath port "
4036 "%"PRIu32
, odp_in_port
);
4038 drop_key
= drop_key_lookup(backer
, upcall
->key
, upcall
->key_len
);
4040 drop_key
= xmalloc(sizeof *drop_key
);
4041 drop_key
->key
= xmemdup(upcall
->key
, upcall
->key_len
);
4042 drop_key
->key_len
= upcall
->key_len
;
4044 hmap_insert(&backer
->drop_keys
, &drop_key
->hmap_node
,
4045 hash_bytes(drop_key
->key
, drop_key
->key_len
, 0));
4046 dpif_flow_put(backer
->dpif
, DPIF_FP_CREATE
| DPIF_FP_MODIFY
,
4047 drop_key
->key
, drop_key
->key_len
, NULL
, 0, NULL
);
4055 ofproto
->n_missed
++;
4056 flow_extract(upcall
->packet
, flow
.skb_priority
, flow
.skb_mark
,
4057 &flow
.tunnel
, flow
.in_port
, &miss
->flow
);
4059 /* Add other packets to a to-do list. */
4060 hash
= flow_hash(&miss
->flow
, 0);
4061 existing_miss
= flow_miss_find(&todo
, ofproto
, &miss
->flow
, hash
);
4062 if (!existing_miss
) {
4063 hmap_insert(&todo
, &miss
->hmap_node
, hash
);
4064 miss
->ofproto
= ofproto
;
4065 miss
->key
= upcall
->key
;
4066 miss
->key_len
= upcall
->key_len
;
4067 miss
->upcall_type
= upcall
->type
;
4068 list_init(&miss
->packets
);
4072 miss
= existing_miss
;
4074 list_push_back(&miss
->packets
, &upcall
->packet
->list_node
);
4077 /* Process each element in the to-do list, constructing the set of
4078 * operations to batch. */
4080 HMAP_FOR_EACH (miss
, hmap_node
, &todo
) {
4081 handle_flow_miss(miss
, flow_miss_ops
, &n_ops
);
4083 ovs_assert(n_ops
<= ARRAY_SIZE(flow_miss_ops
));
4085 /* Execute batch. */
4086 for (i
= 0; i
< n_ops
; i
++) {
4087 dpif_ops
[i
] = &flow_miss_ops
[i
].dpif_op
;
4089 dpif_operate(backer
->dpif
, dpif_ops
, n_ops
);
4092 for (i
= 0; i
< n_ops
; i
++) {
4093 if (flow_miss_ops
[i
].xout_garbage
) {
4094 xlate_out_uninit(&flow_miss_ops
[i
].xout
);
4097 hmap_destroy(&todo
);
4100 static enum { SFLOW_UPCALL
, MISS_UPCALL
, BAD_UPCALL
, FLOW_SAMPLE_UPCALL
,
4102 classify_upcall(const struct dpif_upcall
*upcall
)
4104 size_t userdata_len
;
4105 union user_action_cookie cookie
;
4107 /* First look at the upcall type. */
4108 switch (upcall
->type
) {
4109 case DPIF_UC_ACTION
:
4115 case DPIF_N_UC_TYPES
:
4117 VLOG_WARN_RL(&rl
, "upcall has unexpected type %"PRIu32
, upcall
->type
);
4121 /* "action" upcalls need a closer look. */
4122 if (!upcall
->userdata
) {
4123 VLOG_WARN_RL(&rl
, "action upcall missing cookie");
4126 userdata_len
= nl_attr_get_size(upcall
->userdata
);
4127 if (userdata_len
< sizeof cookie
.type
4128 || userdata_len
> sizeof cookie
) {
4129 VLOG_WARN_RL(&rl
, "action upcall cookie has unexpected size %zu",
4133 memset(&cookie
, 0, sizeof cookie
);
4134 memcpy(&cookie
, nl_attr_get(upcall
->userdata
), userdata_len
);
4135 if (userdata_len
== sizeof cookie
.sflow
4136 && cookie
.type
== USER_ACTION_COOKIE_SFLOW
) {
4137 return SFLOW_UPCALL
;
4138 } else if (userdata_len
== sizeof cookie
.slow_path
4139 && cookie
.type
== USER_ACTION_COOKIE_SLOW_PATH
) {
4141 } else if (userdata_len
== sizeof cookie
.flow_sample
4142 && cookie
.type
== USER_ACTION_COOKIE_FLOW_SAMPLE
) {
4143 return FLOW_SAMPLE_UPCALL
;
4144 } else if (userdata_len
== sizeof cookie
.ipfix
4145 && cookie
.type
== USER_ACTION_COOKIE_IPFIX
) {
4146 return IPFIX_UPCALL
;
4148 VLOG_WARN_RL(&rl
, "invalid user cookie of type %"PRIu16
4149 " and size %zu", cookie
.type
, userdata_len
);
4155 handle_sflow_upcall(struct dpif_backer
*backer
,
4156 const struct dpif_upcall
*upcall
)
4158 struct ofproto_dpif
*ofproto
;
4159 union user_action_cookie cookie
;
4161 uint32_t odp_in_port
;
4163 if (ofproto_receive(backer
, upcall
->packet
, upcall
->key
, upcall
->key_len
,
4164 &flow
, NULL
, &ofproto
, &odp_in_port
, NULL
)
4165 || !ofproto
->sflow
) {
4169 memset(&cookie
, 0, sizeof cookie
);
4170 memcpy(&cookie
, nl_attr_get(upcall
->userdata
), sizeof cookie
.sflow
);
4171 dpif_sflow_received(ofproto
->sflow
, upcall
->packet
, &flow
,
4172 odp_in_port
, &cookie
);
4176 handle_flow_sample_upcall(struct dpif_backer
*backer
,
4177 const struct dpif_upcall
*upcall
)
4179 struct ofproto_dpif
*ofproto
;
4180 union user_action_cookie cookie
;
4183 if (ofproto_receive(backer
, upcall
->packet
, upcall
->key
, upcall
->key_len
,
4184 &flow
, NULL
, &ofproto
, NULL
, NULL
)
4185 || !ofproto
->ipfix
) {
4189 memset(&cookie
, 0, sizeof cookie
);
4190 memcpy(&cookie
, nl_attr_get(upcall
->userdata
), sizeof cookie
.flow_sample
);
4192 /* The flow reflects exactly the contents of the packet. Sample
4193 * the packet using it. */
4194 dpif_ipfix_flow_sample(ofproto
->ipfix
, upcall
->packet
, &flow
,
4195 cookie
.flow_sample
.collector_set_id
,
4196 cookie
.flow_sample
.probability
,
4197 cookie
.flow_sample
.obs_domain_id
,
4198 cookie
.flow_sample
.obs_point_id
);
4202 handle_ipfix_upcall(struct dpif_backer
*backer
,
4203 const struct dpif_upcall
*upcall
)
4205 struct ofproto_dpif
*ofproto
;
4208 if (ofproto_receive(backer
, upcall
->packet
, upcall
->key
, upcall
->key_len
,
4209 &flow
, NULL
, &ofproto
, NULL
, NULL
)
4210 || !ofproto
->ipfix
) {
4214 /* The flow reflects exactly the contents of the packet. Sample
4215 * the packet using it. */
4216 dpif_ipfix_bridge_sample(ofproto
->ipfix
, upcall
->packet
, &flow
);
4220 handle_upcalls(struct dpif_backer
*backer
, unsigned int max_batch
)
4222 struct dpif_upcall misses
[FLOW_MISS_MAX_BATCH
];
4223 struct ofpbuf miss_bufs
[FLOW_MISS_MAX_BATCH
];
4224 uint64_t miss_buf_stubs
[FLOW_MISS_MAX_BATCH
][4096 / 8];
4229 ovs_assert(max_batch
<= FLOW_MISS_MAX_BATCH
);
4232 for (n_processed
= 0; n_processed
< max_batch
; n_processed
++) {
4233 struct dpif_upcall
*upcall
= &misses
[n_misses
];
4234 struct ofpbuf
*buf
= &miss_bufs
[n_misses
];
4237 ofpbuf_use_stub(buf
, miss_buf_stubs
[n_misses
],
4238 sizeof miss_buf_stubs
[n_misses
]);
4239 error
= dpif_recv(backer
->dpif
, upcall
, buf
);
4245 switch (classify_upcall(upcall
)) {
4247 /* Handle it later. */
4252 handle_sflow_upcall(backer
, upcall
);
4256 case FLOW_SAMPLE_UPCALL
:
4257 handle_flow_sample_upcall(backer
, upcall
);
4262 handle_ipfix_upcall(backer
, upcall
);
4272 /* Handle deferred MISS_UPCALL processing. */
4273 handle_miss_upcalls(backer
, misses
, n_misses
);
4274 for (i
= 0; i
< n_misses
; i
++) {
4275 ofpbuf_uninit(&miss_bufs
[i
]);
4281 /* Flow expiration. */
4283 static int subfacet_max_idle(const struct ofproto_dpif
*);
4284 static void update_stats(struct dpif_backer
*);
4285 static void rule_expire(struct rule_dpif
*);
4286 static void expire_subfacets(struct ofproto_dpif
*, int dp_max_idle
);
4288 /* This function is called periodically by run(). Its job is to collect
4289 * updates for the flows that have been installed into the datapath, most
4290 * importantly when they last were used, and then use that information to
4291 * expire flows that have not been used recently.
4293 * Returns the number of milliseconds after which it should be called again. */
4295 expire(struct dpif_backer
*backer
)
4297 struct ofproto_dpif
*ofproto
;
4298 int max_idle
= INT32_MAX
;
4300 /* Periodically clear out the drop keys in an effort to keep them
4301 * relatively few. */
4302 drop_key_clear(backer
);
4304 /* Update stats for each flow in the backer. */
4305 update_stats(backer
);
4307 HMAP_FOR_EACH (ofproto
, all_ofproto_dpifs_node
, &all_ofproto_dpifs
) {
4308 struct rule
*rule
, *next_rule
;
4311 if (ofproto
->backer
!= backer
) {
4315 /* Keep track of the max number of flows per ofproto_dpif. */
4316 update_max_subfacet_count(ofproto
);
4318 /* Expire subfacets that have been idle too long. */
4319 dp_max_idle
= subfacet_max_idle(ofproto
);
4320 expire_subfacets(ofproto
, dp_max_idle
);
4322 max_idle
= MIN(max_idle
, dp_max_idle
);
4324 /* Expire OpenFlow flows whose idle_timeout or hard_timeout
4326 LIST_FOR_EACH_SAFE (rule
, next_rule
, expirable
,
4327 &ofproto
->up
.expirable
) {
4328 rule_expire(rule_dpif_cast(rule
));
4331 /* All outstanding data in existing flows has been accounted, so it's a
4332 * good time to do bond rebalancing. */
4333 if (ofproto
->has_bonded_bundles
) {
4334 struct ofbundle
*bundle
;
4336 HMAP_FOR_EACH (bundle
, hmap_node
, &ofproto
->bundles
) {
4338 bond_rebalance(bundle
->bond
, &backer
->revalidate_set
);
4344 return MIN(max_idle
, 1000);
4347 /* Updates flow table statistics given that the datapath just reported 'stats'
4348 * as 'subfacet''s statistics. */
4350 update_subfacet_stats(struct subfacet
*subfacet
,
4351 const struct dpif_flow_stats
*stats
)
4353 struct facet
*facet
= subfacet
->facet
;
4354 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(facet
->rule
->up
.ofproto
);
4355 struct dpif_flow_stats diff
;
4357 diff
.tcp_flags
= stats
->tcp_flags
;
4358 diff
.used
= stats
->used
;
4360 if (stats
->n_packets
>= subfacet
->dp_packet_count
) {
4361 diff
.n_packets
= stats
->n_packets
- subfacet
->dp_packet_count
;
4363 VLOG_WARN_RL(&rl
, "unexpected packet count from the datapath");
4367 if (stats
->n_bytes
>= subfacet
->dp_byte_count
) {
4368 diff
.n_bytes
= stats
->n_bytes
- subfacet
->dp_byte_count
;
4370 VLOG_WARN_RL(&rl
, "unexpected byte count from datapath");
4374 ofproto
->n_hit
+= diff
.n_packets
;
4375 subfacet
->dp_packet_count
= stats
->n_packets
;
4376 subfacet
->dp_byte_count
= stats
->n_bytes
;
4377 subfacet_update_stats(subfacet
, &diff
);
4379 if (facet
->accounted_bytes
< facet
->byte_count
) {
4381 facet_account(facet
);
4382 facet
->accounted_bytes
= facet
->byte_count
;
4386 /* 'key' with length 'key_len' bytes is a flow in 'dpif' that we know nothing
4387 * about, or a flow that shouldn't be installed but was anyway. Delete it. */
4389 delete_unexpected_flow(struct ofproto_dpif
*ofproto
,
4390 const struct nlattr
*key
, size_t key_len
)
4392 if (!VLOG_DROP_WARN(&rl
)) {
4396 odp_flow_key_format(key
, key_len
, &s
);
4397 VLOG_WARN("unexpected flow on %s: %s", ofproto
->up
.name
, ds_cstr(&s
));
4401 COVERAGE_INC(facet_unexpected
);
4402 dpif_flow_del(ofproto
->backer
->dpif
, key
, key_len
, NULL
);
4405 /* Update 'packet_count', 'byte_count', and 'used' members of installed facets.
4407 * This function also pushes statistics updates to rules which each facet
4408 * resubmits into. Generally these statistics will be accurate. However, if a
4409 * facet changes the rule it resubmits into at some time in between
4410 * update_stats() runs, it is possible that statistics accrued to the
4411 * old rule will be incorrectly attributed to the new rule. This could be
4412 * avoided by calling update_stats() whenever rules are created or
4413 * deleted. However, the performance impact of making so many calls to the
4414 * datapath do not justify the benefit of having perfectly accurate statistics.
4416 * In addition, this function maintains per ofproto flow hit counts. The patch
4417 * port is not treated specially. e.g. A packet ingress from br0 patched into
4418 * br1 will increase the hit count of br0 by 1, however, does not affect
4419 * the hit or miss counts of br1.
4422 update_stats(struct dpif_backer
*backer
)
4424 const struct dpif_flow_stats
*stats
;
4425 struct dpif_flow_dump dump
;
4426 const struct nlattr
*key
;
4427 struct ofproto_dpif
*ofproto
;
4430 dpif_flow_dump_start(&dump
, backer
->dpif
);
4431 while (dpif_flow_dump_next(&dump
, &key
, &key_len
, NULL
, NULL
, &stats
)) {
4433 struct subfacet
*subfacet
;
4436 if (ofproto_receive(backer
, NULL
, key
, key_len
, &flow
, NULL
, &ofproto
,
4441 ofproto
->total_subfacet_count
+= hmap_count(&ofproto
->subfacets
);
4442 ofproto
->n_update_stats
++;
4444 key_hash
= odp_flow_key_hash(key
, key_len
);
4445 subfacet
= subfacet_find(ofproto
, key
, key_len
, key_hash
);
4446 switch (subfacet
? subfacet
->path
: SF_NOT_INSTALLED
) {
4448 update_subfacet_stats(subfacet
, stats
);
4452 /* Stats are updated per-packet. */
4455 case SF_NOT_INSTALLED
:
4457 delete_unexpected_flow(ofproto
, key
, key_len
);
4462 dpif_flow_dump_done(&dump
);
4464 HMAP_FOR_EACH (ofproto
, all_ofproto_dpifs_node
, &all_ofproto_dpifs
) {
4465 update_moving_averages(ofproto
);
4470 /* Calculates and returns the number of milliseconds of idle time after which
4471 * subfacets should expire from the datapath. When a subfacet expires, we fold
4472 * its statistics into its facet, and when a facet's last subfacet expires, we
4473 * fold its statistic into its rule. */
4475 subfacet_max_idle(const struct ofproto_dpif
*ofproto
)
4478 * Idle time histogram.
4480 * Most of the time a switch has a relatively small number of subfacets.
4481 * When this is the case we might as well keep statistics for all of them
4482 * in userspace and to cache them in the kernel datapath for performance as
4485 * As the number of subfacets increases, the memory required to maintain
4486 * statistics about them in userspace and in the kernel becomes
4487 * significant. However, with a large number of subfacets it is likely
4488 * that only a few of them are "heavy hitters" that consume a large amount
4489 * of bandwidth. At this point, only heavy hitters are worth caching in
4490 * the kernel and maintaining in userspaces; other subfacets we can
4493 * The technique used to compute the idle time is to build a histogram with
4494 * N_BUCKETS buckets whose width is BUCKET_WIDTH msecs each. Each subfacet
4495 * that is installed in the kernel gets dropped in the appropriate bucket.
4496 * After the histogram has been built, we compute the cutoff so that only
4497 * the most-recently-used 1% of subfacets (but at least
4498 * ofproto->up.flow_eviction_threshold flows) are kept cached. At least
4499 * the most-recently-used bucket of subfacets is kept, so actually an
4500 * arbitrary number of subfacets can be kept in any given expiration run
4501 * (though the next run will delete most of those unless they receive
4504 * This requires a second pass through the subfacets, in addition to the
4505 * pass made by update_stats(), because the former function never looks at
4506 * uninstallable subfacets.
4508 enum { BUCKET_WIDTH
= ROUND_UP(100, TIME_UPDATE_INTERVAL
) };
4509 enum { N_BUCKETS
= 5000 / BUCKET_WIDTH
};
4510 int buckets
[N_BUCKETS
] = { 0 };
4511 int total
, subtotal
, bucket
;
4512 struct subfacet
*subfacet
;
4516 total
= hmap_count(&ofproto
->subfacets
);
4517 if (total
<= ofproto
->up
.flow_eviction_threshold
) {
4518 return N_BUCKETS
* BUCKET_WIDTH
;
4521 /* Build histogram. */
4523 HMAP_FOR_EACH (subfacet
, hmap_node
, &ofproto
->subfacets
) {
4524 long long int idle
= now
- subfacet
->used
;
4525 int bucket
= (idle
<= 0 ? 0
4526 : idle
>= BUCKET_WIDTH
* N_BUCKETS
? N_BUCKETS
- 1
4527 : (unsigned int) idle
/ BUCKET_WIDTH
);
4531 /* Find the first bucket whose flows should be expired. */
4532 subtotal
= bucket
= 0;
4534 subtotal
+= buckets
[bucket
++];
4535 } while (bucket
< N_BUCKETS
&&
4536 subtotal
< MAX(ofproto
->up
.flow_eviction_threshold
, total
/ 100));
4538 if (VLOG_IS_DBG_ENABLED()) {
4542 ds_put_cstr(&s
, "keep");
4543 for (i
= 0; i
< N_BUCKETS
; i
++) {
4545 ds_put_cstr(&s
, ", drop");
4548 ds_put_format(&s
, " %d:%d", i
* BUCKET_WIDTH
, buckets
[i
]);
4551 VLOG_INFO("%s: %s (msec:count)", ofproto
->up
.name
, ds_cstr(&s
));
4555 return bucket
* BUCKET_WIDTH
;
4559 expire_subfacets(struct ofproto_dpif
*ofproto
, int dp_max_idle
)
4561 /* Cutoff time for most flows. */
4562 long long int normal_cutoff
= time_msec() - dp_max_idle
;
4564 /* We really want to keep flows for special protocols around, so use a more
4565 * conservative cutoff. */
4566 long long int special_cutoff
= time_msec() - 10000;
4568 struct subfacet
*subfacet
, *next_subfacet
;
4569 struct subfacet
*batch
[SUBFACET_DESTROY_MAX_BATCH
];
4573 HMAP_FOR_EACH_SAFE (subfacet
, next_subfacet
, hmap_node
,
4574 &ofproto
->subfacets
) {
4575 long long int cutoff
;
4577 cutoff
= (subfacet
->facet
->xout
.slow
& (SLOW_CFM
| SLOW_BFD
| SLOW_LACP
4581 if (subfacet
->used
< cutoff
) {
4582 if (subfacet
->path
!= SF_NOT_INSTALLED
) {
4583 batch
[n_batch
++] = subfacet
;
4584 if (n_batch
>= SUBFACET_DESTROY_MAX_BATCH
) {
4585 subfacet_destroy_batch(ofproto
, batch
, n_batch
);
4589 subfacet_destroy(subfacet
);
4595 subfacet_destroy_batch(ofproto
, batch
, n_batch
);
4599 /* If 'rule' is an OpenFlow rule, that has expired according to OpenFlow rules,
4600 * then delete it entirely. */
4602 rule_expire(struct rule_dpif
*rule
)
4604 struct facet
*facet
, *next_facet
;
4608 if (rule
->up
.pending
) {
4609 /* We'll have to expire it later. */
4613 /* Has 'rule' expired? */
4615 if (rule
->up
.hard_timeout
4616 && now
> rule
->up
.modified
+ rule
->up
.hard_timeout
* 1000) {
4617 reason
= OFPRR_HARD_TIMEOUT
;
4618 } else if (rule
->up
.idle_timeout
4619 && now
> rule
->up
.used
+ rule
->up
.idle_timeout
* 1000) {
4620 reason
= OFPRR_IDLE_TIMEOUT
;
4625 COVERAGE_INC(ofproto_dpif_expired
);
4627 /* Update stats. (This is a no-op if the rule expired due to an idle
4628 * timeout, because that only happens when the rule has no facets left.) */
4629 LIST_FOR_EACH_SAFE (facet
, next_facet
, list_node
, &rule
->facets
) {
4630 facet_remove(facet
);
4633 /* Get rid of the rule. */
4634 ofproto_rule_expire(&rule
->up
, reason
);
4639 /* Creates and returns a new facet based on 'miss'.
4641 * The caller must already have determined that no facet with an identical
4642 * 'miss->flow' exists in 'miss->ofproto'.
4644 * 'hash' must be the return value of flow_hash(miss->flow, 0).
4646 * The facet will initially have no subfacets. The caller should create (at
4647 * least) one subfacet with subfacet_create(). */
4648 static struct facet
*
4649 facet_create(const struct flow_miss
*miss
, uint32_t hash
)
4651 struct ofproto_dpif
*ofproto
= miss
->ofproto
;
4652 struct xlate_in xin
;
4653 struct facet
*facet
;
4655 facet
= xzalloc(sizeof *facet
);
4656 facet
->used
= time_msec();
4657 facet
->flow
= miss
->flow
;
4658 facet
->initial_vals
= miss
->initial_vals
;
4659 facet
->rule
= rule_dpif_lookup(ofproto
, &facet
->flow
);
4660 facet
->learn_rl
= time_msec() + 500;
4662 hmap_insert(&ofproto
->facets
, &facet
->hmap_node
, hash
);
4663 list_push_back(&facet
->rule
->facets
, &facet
->list_node
);
4664 list_init(&facet
->subfacets
);
4665 netflow_flow_init(&facet
->nf_flow
);
4666 netflow_flow_update_time(ofproto
->netflow
, &facet
->nf_flow
, facet
->used
);
4668 xlate_in_init(&xin
, ofproto
, &facet
->flow
, &facet
->initial_vals
,
4669 facet
->rule
, 0, NULL
);
4670 xin
.may_learn
= true;
4671 xlate_actions(&xin
, &facet
->xout
);
4672 facet
->nf_flow
.output_iface
= facet
->xout
.nf_output_iface
;
4678 facet_free(struct facet
*facet
)
4681 xlate_out_uninit(&facet
->xout
);
4686 /* Executes, within 'ofproto', the 'n_actions' actions in 'actions' on
4687 * 'packet', which arrived on 'in_port'. */
4689 execute_odp_actions(struct ofproto_dpif
*ofproto
, const struct flow
*flow
,
4690 const struct nlattr
*odp_actions
, size_t actions_len
,
4691 struct ofpbuf
*packet
)
4693 struct odputil_keybuf keybuf
;
4697 ofpbuf_use_stack(&key
, &keybuf
, sizeof keybuf
);
4698 odp_flow_key_from_flow(&key
, flow
,
4699 ofp_port_to_odp_port(ofproto
, flow
->in_port
));
4701 error
= dpif_execute(ofproto
->backer
->dpif
, key
.data
, key
.size
,
4702 odp_actions
, actions_len
, packet
);
4706 /* Remove 'facet' from 'ofproto' and free up the associated memory:
4708 * - If 'facet' was installed in the datapath, uninstalls it and updates its
4709 * rule's statistics, via subfacet_uninstall().
4711 * - Removes 'facet' from its rule and from ofproto->facets.
4714 facet_remove(struct facet
*facet
)
4716 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(facet
->rule
->up
.ofproto
);
4717 struct subfacet
*subfacet
, *next_subfacet
;
4719 ovs_assert(!list_is_empty(&facet
->subfacets
));
4721 /* First uninstall all of the subfacets to get final statistics. */
4722 LIST_FOR_EACH (subfacet
, list_node
, &facet
->subfacets
) {
4723 subfacet_uninstall(subfacet
);
4726 /* Flush the final stats to the rule.
4728 * This might require us to have at least one subfacet around so that we
4729 * can use its actions for accounting in facet_account(), which is why we
4730 * have uninstalled but not yet destroyed the subfacets. */
4731 facet_flush_stats(facet
);
4733 /* Now we're really all done so destroy everything. */
4734 LIST_FOR_EACH_SAFE (subfacet
, next_subfacet
, list_node
,
4735 &facet
->subfacets
) {
4736 subfacet_destroy__(subfacet
);
4738 hmap_remove(&ofproto
->facets
, &facet
->hmap_node
);
4739 list_remove(&facet
->list_node
);
4743 /* Feed information from 'facet' back into the learning table to keep it in
4744 * sync with what is actually flowing through the datapath. */
4746 facet_learn(struct facet
*facet
)
4748 long long int now
= time_msec();
4750 if (!facet
->xout
.has_fin_timeout
&& now
< facet
->learn_rl
) {
4754 facet
->learn_rl
= now
+ 500;
4756 if (!facet
->xout
.has_learn
4757 && !facet
->xout
.has_normal
4758 && (!facet
->xout
.has_fin_timeout
4759 || !(facet
->tcp_flags
& (TCP_FIN
| TCP_RST
)))) {
4763 facet_push_stats(facet
, true);
4767 facet_account(struct facet
*facet
)
4769 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(facet
->rule
->up
.ofproto
);
4770 const struct nlattr
*a
;
4775 if (!facet
->xout
.has_normal
|| !ofproto
->has_bonded_bundles
) {
4778 n_bytes
= facet
->byte_count
- facet
->accounted_bytes
;
4780 /* This loop feeds byte counters to bond_account() for rebalancing to use
4781 * as a basis. We also need to track the actual VLAN on which the packet
4782 * is going to be sent to ensure that it matches the one passed to
4783 * bond_choose_output_slave(). (Otherwise, we will account to the wrong
4786 * We use the actions from an arbitrary subfacet because they should all
4787 * be equally valid for our purpose. */
4788 vlan_tci
= facet
->flow
.vlan_tci
;
4789 NL_ATTR_FOR_EACH_UNSAFE (a
, left
, facet
->xout
.odp_actions
.data
,
4790 facet
->xout
.odp_actions
.size
) {
4791 const struct ovs_action_push_vlan
*vlan
;
4792 struct ofport_dpif
*port
;
4794 switch (nl_attr_type(a
)) {
4795 case OVS_ACTION_ATTR_OUTPUT
:
4796 port
= get_odp_port(ofproto
, nl_attr_get_u32(a
));
4797 if (port
&& port
->bundle
&& port
->bundle
->bond
) {
4798 bond_account(port
->bundle
->bond
, &facet
->flow
,
4799 vlan_tci_to_vid(vlan_tci
), n_bytes
);
4803 case OVS_ACTION_ATTR_POP_VLAN
:
4804 vlan_tci
= htons(0);
4807 case OVS_ACTION_ATTR_PUSH_VLAN
:
4808 vlan
= nl_attr_get(a
);
4809 vlan_tci
= vlan
->vlan_tci
;
4815 /* Returns true if the only action for 'facet' is to send to the controller.
4816 * (We don't report NetFlow expiration messages for such facets because they
4817 * are just part of the control logic for the network, not real traffic). */
4819 facet_is_controller_flow(struct facet
*facet
)
4822 const struct rule
*rule
= &facet
->rule
->up
;
4823 const struct ofpact
*ofpacts
= rule
->ofpacts
;
4824 size_t ofpacts_len
= rule
->ofpacts_len
;
4826 if (ofpacts_len
> 0 &&
4827 ofpacts
->type
== OFPACT_CONTROLLER
&&
4828 ofpact_next(ofpacts
) >= ofpact_end(ofpacts
, ofpacts_len
)) {
4835 /* Folds all of 'facet''s statistics into its rule. Also updates the
4836 * accounting ofhook and emits a NetFlow expiration if appropriate. All of
4837 * 'facet''s statistics in the datapath should have been zeroed and folded into
4838 * its packet and byte counts before this function is called. */
4840 facet_flush_stats(struct facet
*facet
)
4842 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(facet
->rule
->up
.ofproto
);
4843 struct subfacet
*subfacet
;
4845 LIST_FOR_EACH (subfacet
, list_node
, &facet
->subfacets
) {
4846 ovs_assert(!subfacet
->dp_byte_count
);
4847 ovs_assert(!subfacet
->dp_packet_count
);
4850 facet_push_stats(facet
, false);
4851 if (facet
->accounted_bytes
< facet
->byte_count
) {
4852 facet_account(facet
);
4853 facet
->accounted_bytes
= facet
->byte_count
;
4856 if (ofproto
->netflow
&& !facet_is_controller_flow(facet
)) {
4857 struct ofexpired expired
;
4858 expired
.flow
= facet
->flow
;
4859 expired
.packet_count
= facet
->packet_count
;
4860 expired
.byte_count
= facet
->byte_count
;
4861 expired
.used
= facet
->used
;
4862 netflow_expire(ofproto
->netflow
, &facet
->nf_flow
, &expired
);
4865 /* Reset counters to prevent double counting if 'facet' ever gets
4867 facet_reset_counters(facet
);
4869 netflow_flow_clear(&facet
->nf_flow
);
4870 facet
->tcp_flags
= 0;
4873 /* Searches 'ofproto''s table of facets for one exactly equal to 'flow'.
4874 * Returns it if found, otherwise a null pointer.
4876 * 'hash' must be the return value of flow_hash(flow, 0).
4878 * The returned facet might need revalidation; use facet_lookup_valid()
4879 * instead if that is important. */
4880 static struct facet
*
4881 facet_find(struct ofproto_dpif
*ofproto
,
4882 const struct flow
*flow
, uint32_t hash
)
4884 struct facet
*facet
;
4886 HMAP_FOR_EACH_WITH_HASH (facet
, hmap_node
, hash
, &ofproto
->facets
) {
4887 if (flow_equal(flow
, &facet
->flow
)) {
4895 /* Searches 'ofproto''s table of facets for one exactly equal to 'flow'.
4896 * Returns it if found, otherwise a null pointer.
4898 * 'hash' must be the return value of flow_hash(flow, 0).
4900 * The returned facet is guaranteed to be valid. */
4901 static struct facet
*
4902 facet_lookup_valid(struct ofproto_dpif
*ofproto
, const struct flow
*flow
,
4905 struct facet
*facet
;
4907 facet
= facet_find(ofproto
, flow
, hash
);
4909 && (ofproto
->backer
->need_revalidate
4910 || tag_set_intersects(&ofproto
->backer
->revalidate_set
,
4912 && !facet_revalidate(facet
)) {
4920 facet_check_consistency(struct facet
*facet
)
4922 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(1, 15);
4924 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(facet
->rule
->up
.ofproto
);
4926 struct xlate_out xout
;
4927 struct xlate_in xin
;
4929 struct rule_dpif
*rule
;
4932 /* Check the rule for consistency. */
4933 rule
= rule_dpif_lookup(ofproto
, &facet
->flow
);
4934 if (rule
!= facet
->rule
) {
4935 if (!VLOG_DROP_WARN(&rl
)) {
4936 struct ds s
= DS_EMPTY_INITIALIZER
;
4938 flow_format(&s
, &facet
->flow
);
4939 ds_put_format(&s
, ": facet associated with wrong rule (was "
4940 "table=%"PRIu8
",", facet
->rule
->up
.table_id
);
4941 cls_rule_format(&facet
->rule
->up
.cr
, &s
);
4942 ds_put_format(&s
, ") (should have been table=%"PRIu8
",",
4944 cls_rule_format(&rule
->up
.cr
, &s
);
4945 ds_put_cstr(&s
, ")\n");
4952 /* Check the datapath actions for consistency. */
4953 xlate_in_init(&xin
, ofproto
, &facet
->flow
, &facet
->initial_vals
, rule
,
4955 xlate_actions(&xin
, &xout
);
4957 ok
= ofpbuf_equal(&facet
->xout
.odp_actions
, &xout
.odp_actions
)
4958 && facet
->xout
.slow
== xout
.slow
;
4959 if (!ok
&& !VLOG_DROP_WARN(&rl
)) {
4960 struct ds s
= DS_EMPTY_INITIALIZER
;
4962 flow_format(&s
, &facet
->flow
);
4963 ds_put_cstr(&s
, ": inconsistency in facet");
4965 if (!ofpbuf_equal(&facet
->xout
.odp_actions
, &xout
.odp_actions
)) {
4966 ds_put_cstr(&s
, " (actions were: ");
4967 format_odp_actions(&s
, facet
->xout
.odp_actions
.data
,
4968 facet
->xout
.odp_actions
.size
);
4969 ds_put_cstr(&s
, ") (correct actions: ");
4970 format_odp_actions(&s
, xout
.odp_actions
.data
,
4971 xout
.odp_actions
.size
);
4972 ds_put_cstr(&s
, ")");
4975 if (facet
->xout
.slow
!= xout
.slow
) {
4976 ds_put_format(&s
, " slow path incorrect. should be %d", xout
.slow
);
4981 xlate_out_uninit(&xout
);
4986 /* Re-searches the classifier for 'facet':
4988 * - If the rule found is different from 'facet''s current rule, moves
4989 * 'facet' to the new rule and recompiles its actions.
4991 * - If the rule found is the same as 'facet''s current rule, leaves 'facet'
4992 * where it is and recompiles its actions anyway.
4994 * - If any of 'facet''s subfacets correspond to a new flow according to
4995 * ofproto_receive(), 'facet' is removed.
4997 * Returns true if 'facet' is still valid. False if 'facet' was removed. */
4999 facet_revalidate(struct facet
*facet
)
5001 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(facet
->rule
->up
.ofproto
);
5002 struct rule_dpif
*new_rule
;
5003 struct subfacet
*subfacet
;
5004 struct xlate_out xout
;
5005 struct xlate_in xin
;
5007 COVERAGE_INC(facet_revalidate
);
5009 /* Check that child subfacets still correspond to this facet. Tunnel
5010 * configuration changes could cause a subfacet's OpenFlow in_port to
5012 LIST_FOR_EACH (subfacet
, list_node
, &facet
->subfacets
) {
5013 struct ofproto_dpif
*recv_ofproto
;
5014 struct flow recv_flow
;
5017 error
= ofproto_receive(ofproto
->backer
, NULL
, subfacet
->key
,
5018 subfacet
->key_len
, &recv_flow
, NULL
,
5019 &recv_ofproto
, NULL
, NULL
);
5021 || recv_ofproto
!= ofproto
5022 || memcmp(&recv_flow
, &facet
->flow
, sizeof recv_flow
)) {
5023 facet_remove(facet
);
5028 new_rule
= rule_dpif_lookup(ofproto
, &facet
->flow
);
5030 /* Calculate new datapath actions.
5032 * We do not modify any 'facet' state yet, because we might need to, e.g.,
5033 * emit a NetFlow expiration and, if so, we need to have the old state
5034 * around to properly compose it. */
5035 xlate_in_init(&xin
, ofproto
, &facet
->flow
, &facet
->initial_vals
, new_rule
,
5037 xlate_actions(&xin
, &xout
);
5039 /* A facet's slow path reason should only change under dramatic
5040 * circumstances. Rather than try to update everything, it's simpler to
5041 * remove the facet and start over. */
5042 if (facet
->xout
.slow
!= xout
.slow
) {
5043 facet_remove(facet
);
5044 xlate_out_uninit(&xout
);
5048 if (!ofpbuf_equal(&facet
->xout
.odp_actions
, &xout
.odp_actions
)) {
5049 LIST_FOR_EACH(subfacet
, list_node
, &facet
->subfacets
) {
5050 if (subfacet
->path
== SF_FAST_PATH
) {
5051 struct dpif_flow_stats stats
;
5053 subfacet_install(subfacet
, &xout
.odp_actions
, &stats
);
5054 subfacet_update_stats(subfacet
, &stats
);
5058 facet_flush_stats(facet
);
5060 ofpbuf_clear(&facet
->xout
.odp_actions
);
5061 ofpbuf_put(&facet
->xout
.odp_actions
, xout
.odp_actions
.data
,
5062 xout
.odp_actions
.size
);
5065 /* Update 'facet' now that we've taken care of all the old state. */
5066 facet
->xout
.tags
= xout
.tags
;
5067 facet
->xout
.slow
= xout
.slow
;
5068 facet
->xout
.has_learn
= xout
.has_learn
;
5069 facet
->xout
.has_normal
= xout
.has_normal
;
5070 facet
->xout
.has_fin_timeout
= xout
.has_fin_timeout
;
5071 facet
->xout
.nf_output_iface
= xout
.nf_output_iface
;
5072 facet
->xout
.mirrors
= xout
.mirrors
;
5073 facet
->nf_flow
.output_iface
= facet
->xout
.nf_output_iface
;
5075 if (facet
->rule
!= new_rule
) {
5076 COVERAGE_INC(facet_changed_rule
);
5077 list_remove(&facet
->list_node
);
5078 list_push_back(&new_rule
->facets
, &facet
->list_node
);
5079 facet
->rule
= new_rule
;
5080 facet
->used
= new_rule
->up
.created
;
5081 facet
->prev_used
= facet
->used
;
5084 xlate_out_uninit(&xout
);
5089 facet_reset_counters(struct facet
*facet
)
5091 facet
->packet_count
= 0;
5092 facet
->byte_count
= 0;
5093 facet
->prev_packet_count
= 0;
5094 facet
->prev_byte_count
= 0;
5095 facet
->accounted_bytes
= 0;
5099 facet_push_stats(struct facet
*facet
, bool may_learn
)
5101 struct dpif_flow_stats stats
;
5103 ovs_assert(facet
->packet_count
>= facet
->prev_packet_count
);
5104 ovs_assert(facet
->byte_count
>= facet
->prev_byte_count
);
5105 ovs_assert(facet
->used
>= facet
->prev_used
);
5107 stats
.n_packets
= facet
->packet_count
- facet
->prev_packet_count
;
5108 stats
.n_bytes
= facet
->byte_count
- facet
->prev_byte_count
;
5109 stats
.used
= facet
->used
;
5110 stats
.tcp_flags
= facet
->tcp_flags
;
5112 if (may_learn
|| stats
.n_packets
|| facet
->used
> facet
->prev_used
) {
5113 struct ofproto_dpif
*ofproto
=
5114 ofproto_dpif_cast(facet
->rule
->up
.ofproto
);
5116 struct ofport_dpif
*in_port
;
5117 struct xlate_in xin
;
5119 facet
->prev_packet_count
= facet
->packet_count
;
5120 facet
->prev_byte_count
= facet
->byte_count
;
5121 facet
->prev_used
= facet
->used
;
5123 in_port
= get_ofp_port(ofproto
, facet
->flow
.in_port
);
5124 if (in_port
&& in_port
->tnl_port
) {
5125 netdev_vport_inc_rx(in_port
->up
.netdev
, &stats
);
5128 rule_credit_stats(facet
->rule
, &stats
);
5129 netflow_flow_update_time(ofproto
->netflow
, &facet
->nf_flow
,
5131 netflow_flow_update_flags(&facet
->nf_flow
, facet
->tcp_flags
);
5132 update_mirror_stats(ofproto
, facet
->xout
.mirrors
, stats
.n_packets
,
5135 xlate_in_init(&xin
, ofproto
, &facet
->flow
, &facet
->initial_vals
,
5136 facet
->rule
, stats
.tcp_flags
, NULL
);
5137 xin
.resubmit_stats
= &stats
;
5138 xin
.may_learn
= may_learn
;
5139 xlate_actions_for_side_effects(&xin
);
5144 push_all_stats__(bool run_fast
)
5146 static long long int rl
= LLONG_MIN
;
5147 struct ofproto_dpif
*ofproto
;
5149 if (time_msec() < rl
) {
5153 HMAP_FOR_EACH (ofproto
, all_ofproto_dpifs_node
, &all_ofproto_dpifs
) {
5154 struct facet
*facet
;
5156 HMAP_FOR_EACH (facet
, hmap_node
, &ofproto
->facets
) {
5157 facet_push_stats(facet
, false);
5164 rl
= time_msec() + 100;
5168 push_all_stats(void)
5170 push_all_stats__(true);
5174 rule_credit_stats(struct rule_dpif
*rule
, const struct dpif_flow_stats
*stats
)
5176 rule
->packet_count
+= stats
->n_packets
;
5177 rule
->byte_count
+= stats
->n_bytes
;
5178 ofproto_rule_update_used(&rule
->up
, stats
->used
);
5183 static struct subfacet
*
5184 subfacet_find(struct ofproto_dpif
*ofproto
,
5185 const struct nlattr
*key
, size_t key_len
, uint32_t key_hash
)
5187 struct subfacet
*subfacet
;
5189 HMAP_FOR_EACH_WITH_HASH (subfacet
, hmap_node
, key_hash
,
5190 &ofproto
->subfacets
) {
5191 if (subfacet
->key_len
== key_len
5192 && !memcmp(key
, subfacet
->key
, key_len
)) {
5200 /* Searches 'facet' (within 'ofproto') for a subfacet with the specified
5201 * 'key_fitness', 'key', and 'key_len' members in 'miss'. Returns the
5202 * existing subfacet if there is one, otherwise creates and returns a
5204 static struct subfacet
*
5205 subfacet_create(struct facet
*facet
, struct flow_miss
*miss
,
5208 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(facet
->rule
->up
.ofproto
);
5209 enum odp_key_fitness key_fitness
= miss
->key_fitness
;
5210 const struct nlattr
*key
= miss
->key
;
5211 size_t key_len
= miss
->key_len
;
5213 struct subfacet
*subfacet
;
5215 key_hash
= odp_flow_key_hash(key
, key_len
);
5217 if (list_is_empty(&facet
->subfacets
)) {
5218 subfacet
= &facet
->one_subfacet
;
5220 subfacet
= subfacet_find(ofproto
, key
, key_len
, key_hash
);
5222 if (subfacet
->facet
== facet
) {
5226 /* This shouldn't happen. */
5227 VLOG_ERR_RL(&rl
, "subfacet with wrong facet");
5228 subfacet_destroy(subfacet
);
5231 subfacet
= xmalloc(sizeof *subfacet
);
5234 hmap_insert(&ofproto
->subfacets
, &subfacet
->hmap_node
, key_hash
);
5235 list_push_back(&facet
->subfacets
, &subfacet
->list_node
);
5236 subfacet
->facet
= facet
;
5237 subfacet
->key_fitness
= key_fitness
;
5238 subfacet
->key
= xmemdup(key
, key_len
);
5239 subfacet
->key_len
= key_len
;
5240 subfacet
->used
= now
;
5241 subfacet
->created
= now
;
5242 subfacet
->dp_packet_count
= 0;
5243 subfacet
->dp_byte_count
= 0;
5244 subfacet
->path
= SF_NOT_INSTALLED
;
5246 ofproto
->subfacet_add_count
++;
5250 /* Uninstalls 'subfacet' from the datapath, if it is installed, removes it from
5251 * its facet within 'ofproto', and frees it. */
5253 subfacet_destroy__(struct subfacet
*subfacet
)
5255 struct facet
*facet
= subfacet
->facet
;
5256 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(facet
->rule
->up
.ofproto
);
5258 /* Update ofproto stats before uninstall the subfacet. */
5259 ofproto
->subfacet_del_count
++;
5260 ofproto
->total_subfacet_life_span
+= (time_msec() - subfacet
->created
);
5262 subfacet_uninstall(subfacet
);
5263 hmap_remove(&ofproto
->subfacets
, &subfacet
->hmap_node
);
5264 list_remove(&subfacet
->list_node
);
5265 free(subfacet
->key
);
5266 if (subfacet
!= &facet
->one_subfacet
) {
5271 /* Destroys 'subfacet', as with subfacet_destroy__(), and then if this was the
5272 * last remaining subfacet in its facet destroys the facet too. */
5274 subfacet_destroy(struct subfacet
*subfacet
)
5276 struct facet
*facet
= subfacet
->facet
;
5278 if (list_is_singleton(&facet
->subfacets
)) {
5279 /* facet_remove() needs at least one subfacet (it will remove it). */
5280 facet_remove(facet
);
5282 subfacet_destroy__(subfacet
);
5287 subfacet_destroy_batch(struct ofproto_dpif
*ofproto
,
5288 struct subfacet
**subfacets
, int n
)
5290 struct dpif_op ops
[SUBFACET_DESTROY_MAX_BATCH
];
5291 struct dpif_op
*opsp
[SUBFACET_DESTROY_MAX_BATCH
];
5292 struct dpif_flow_stats stats
[SUBFACET_DESTROY_MAX_BATCH
];
5295 for (i
= 0; i
< n
; i
++) {
5296 ops
[i
].type
= DPIF_OP_FLOW_DEL
;
5297 ops
[i
].u
.flow_del
.key
= subfacets
[i
]->key
;
5298 ops
[i
].u
.flow_del
.key_len
= subfacets
[i
]->key_len
;
5299 ops
[i
].u
.flow_del
.stats
= &stats
[i
];
5303 dpif_operate(ofproto
->backer
->dpif
, opsp
, n
);
5304 for (i
= 0; i
< n
; i
++) {
5305 subfacet_reset_dp_stats(subfacets
[i
], &stats
[i
]);
5306 subfacets
[i
]->path
= SF_NOT_INSTALLED
;
5307 subfacet_destroy(subfacets
[i
]);
5312 /* Updates 'subfacet''s datapath flow, setting its actions to 'actions_len'
5313 * bytes of actions in 'actions'. If 'stats' is non-null, statistics counters
5314 * in the datapath will be zeroed and 'stats' will be updated with traffic new
5315 * since 'subfacet' was last updated.
5317 * Returns 0 if successful, otherwise a positive errno value. */
5319 subfacet_install(struct subfacet
*subfacet
, const struct ofpbuf
*odp_actions
,
5320 struct dpif_flow_stats
*stats
)
5322 struct facet
*facet
= subfacet
->facet
;
5323 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(facet
->rule
->up
.ofproto
);
5324 enum subfacet_path path
= facet
->xout
.slow
? SF_SLOW_PATH
: SF_FAST_PATH
;
5325 const struct nlattr
*actions
= odp_actions
->data
;
5326 size_t actions_len
= odp_actions
->size
;
5328 uint64_t slow_path_stub
[128 / 8];
5329 enum dpif_flow_put_flags flags
;
5332 flags
= DPIF_FP_CREATE
| DPIF_FP_MODIFY
;
5334 flags
|= DPIF_FP_ZERO_STATS
;
5337 if (path
== SF_SLOW_PATH
) {
5338 compose_slow_path(ofproto
, &facet
->flow
, facet
->xout
.slow
,
5339 slow_path_stub
, sizeof slow_path_stub
,
5340 &actions
, &actions_len
);
5343 ret
= dpif_flow_put(ofproto
->backer
->dpif
, flags
, subfacet
->key
,
5344 subfacet
->key_len
, actions
, actions_len
, stats
);
5347 subfacet_reset_dp_stats(subfacet
, stats
);
5351 subfacet
->path
= path
;
5356 /* If 'subfacet' is installed in the datapath, uninstalls it. */
5358 subfacet_uninstall(struct subfacet
*subfacet
)
5360 if (subfacet
->path
!= SF_NOT_INSTALLED
) {
5361 struct rule_dpif
*rule
= subfacet
->facet
->rule
;
5362 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(rule
->up
.ofproto
);
5363 struct dpif_flow_stats stats
;
5366 error
= dpif_flow_del(ofproto
->backer
->dpif
, subfacet
->key
,
5367 subfacet
->key_len
, &stats
);
5368 subfacet_reset_dp_stats(subfacet
, &stats
);
5370 subfacet_update_stats(subfacet
, &stats
);
5372 subfacet
->path
= SF_NOT_INSTALLED
;
5374 ovs_assert(subfacet
->dp_packet_count
== 0);
5375 ovs_assert(subfacet
->dp_byte_count
== 0);
5379 /* Resets 'subfacet''s datapath statistics counters. This should be called
5380 * when 'subfacet''s statistics are cleared in the datapath. If 'stats' is
5381 * non-null, it should contain the statistics returned by dpif when 'subfacet'
5382 * was reset in the datapath. 'stats' will be modified to include only
5383 * statistics new since 'subfacet' was last updated. */
5385 subfacet_reset_dp_stats(struct subfacet
*subfacet
,
5386 struct dpif_flow_stats
*stats
)
5389 && subfacet
->dp_packet_count
<= stats
->n_packets
5390 && subfacet
->dp_byte_count
<= stats
->n_bytes
) {
5391 stats
->n_packets
-= subfacet
->dp_packet_count
;
5392 stats
->n_bytes
-= subfacet
->dp_byte_count
;
5395 subfacet
->dp_packet_count
= 0;
5396 subfacet
->dp_byte_count
= 0;
5399 /* Folds the statistics from 'stats' into the counters in 'subfacet'.
5401 * Because of the meaning of a subfacet's counters, it only makes sense to do
5402 * this if 'stats' are not tracked in the datapath, that is, if 'stats'
5403 * represents a packet that was sent by hand or if it represents statistics
5404 * that have been cleared out of the datapath. */
5406 subfacet_update_stats(struct subfacet
*subfacet
,
5407 const struct dpif_flow_stats
*stats
)
5409 if (stats
->n_packets
|| stats
->used
> subfacet
->used
) {
5410 struct facet
*facet
= subfacet
->facet
;
5412 subfacet
->used
= MAX(subfacet
->used
, stats
->used
);
5413 facet
->used
= MAX(facet
->used
, stats
->used
);
5414 facet
->packet_count
+= stats
->n_packets
;
5415 facet
->byte_count
+= stats
->n_bytes
;
5416 facet
->tcp_flags
|= stats
->tcp_flags
;
5422 static struct rule_dpif
*
5423 rule_dpif_lookup(struct ofproto_dpif
*ofproto
, const struct flow
*flow
)
5425 struct rule_dpif
*rule
;
5427 rule
= rule_dpif_lookup__(ofproto
, flow
, 0);
5432 return rule_dpif_miss_rule(ofproto
, flow
);
5435 static struct rule_dpif
*
5436 rule_dpif_lookup__(struct ofproto_dpif
*ofproto
, const struct flow
*flow
,
5439 struct cls_rule
*cls_rule
;
5440 struct classifier
*cls
;
5443 if (table_id
>= N_TABLES
) {
5447 cls
= &ofproto
->up
.tables
[table_id
].cls
;
5448 frag
= (flow
->nw_frag
& FLOW_NW_FRAG_ANY
) != 0;
5449 if (frag
&& ofproto
->up
.frag_handling
== OFPC_FRAG_NORMAL
) {
5450 /* We must pretend that transport ports are unavailable. */
5451 struct flow ofpc_normal_flow
= *flow
;
5452 ofpc_normal_flow
.tp_src
= htons(0);
5453 ofpc_normal_flow
.tp_dst
= htons(0);
5454 cls_rule
= classifier_lookup(cls
, &ofpc_normal_flow
);
5455 } else if (frag
&& ofproto
->up
.frag_handling
== OFPC_FRAG_DROP
) {
5456 cls_rule
= &ofproto
->drop_frags_rule
->up
.cr
;
5458 cls_rule
= classifier_lookup(cls
, flow
);
5460 return rule_dpif_cast(rule_from_cls_rule(cls_rule
));
5463 static struct rule_dpif
*
5464 rule_dpif_miss_rule(struct ofproto_dpif
*ofproto
, const struct flow
*flow
)
5466 struct ofport_dpif
*port
;
5468 port
= get_ofp_port(ofproto
, flow
->in_port
);
5470 VLOG_WARN_RL(&rl
, "packet-in on unknown port %"PRIu16
, flow
->in_port
);
5471 return ofproto
->miss_rule
;
5474 if (port
->up
.pp
.config
& OFPUTIL_PC_NO_PACKET_IN
) {
5475 return ofproto
->no_packet_in_rule
;
5477 return ofproto
->miss_rule
;
5481 complete_operation(struct rule_dpif
*rule
)
5483 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(rule
->up
.ofproto
);
5485 rule_invalidate(rule
);
5487 struct dpif_completion
*c
= xmalloc(sizeof *c
);
5488 c
->op
= rule
->up
.pending
;
5489 list_push_back(&ofproto
->completions
, &c
->list_node
);
5491 ofoperation_complete(rule
->up
.pending
, 0);
5495 static struct rule
*
5498 struct rule_dpif
*rule
= xmalloc(sizeof *rule
);
5503 rule_dealloc(struct rule
*rule_
)
5505 struct rule_dpif
*rule
= rule_dpif_cast(rule_
);
5510 rule_construct(struct rule
*rule_
)
5512 struct rule_dpif
*rule
= rule_dpif_cast(rule_
);
5513 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(rule
->up
.ofproto
);
5514 struct rule_dpif
*victim
;
5517 rule
->packet_count
= 0;
5518 rule
->byte_count
= 0;
5520 victim
= rule_dpif_cast(ofoperation_get_victim(rule
->up
.pending
));
5521 if (victim
&& !list_is_empty(&victim
->facets
)) {
5522 struct facet
*facet
;
5524 rule
->facets
= victim
->facets
;
5525 list_moved(&rule
->facets
);
5526 LIST_FOR_EACH (facet
, list_node
, &rule
->facets
) {
5527 /* XXX: We're only clearing our local counters here. It's possible
5528 * that quite a few packets are unaccounted for in the datapath
5529 * statistics. These will be accounted to the new rule instead of
5530 * cleared as required. This could be fixed by clearing out the
5531 * datapath statistics for this facet, but currently it doesn't
5533 facet_reset_counters(facet
);
5537 /* Must avoid list_moved() in this case. */
5538 list_init(&rule
->facets
);
5541 table_id
= rule
->up
.table_id
;
5543 rule
->tag
= victim
->tag
;
5544 } else if (table_id
== 0) {
5549 miniflow_expand(&rule
->up
.cr
.match
.flow
, &flow
);
5550 rule
->tag
= rule_calculate_tag(&flow
, &rule
->up
.cr
.match
.mask
,
5551 ofproto
->tables
[table_id
].basis
);
5554 complete_operation(rule
);
5559 rule_destruct(struct rule
*rule_
)
5561 struct rule_dpif
*rule
= rule_dpif_cast(rule_
);
5562 struct facet
*facet
, *next_facet
;
5564 LIST_FOR_EACH_SAFE (facet
, next_facet
, list_node
, &rule
->facets
) {
5565 facet_revalidate(facet
);
5568 complete_operation(rule
);
5572 rule_get_stats(struct rule
*rule_
, uint64_t *packets
, uint64_t *bytes
)
5574 struct rule_dpif
*rule
= rule_dpif_cast(rule_
);
5576 /* push_all_stats() can handle flow misses which, when using the learn
5577 * action, can cause rules to be added and deleted. This can corrupt our
5578 * caller's datastructures which assume that rule_get_stats() doesn't have
5579 * an impact on the flow table. To be safe, we disable miss handling. */
5580 push_all_stats__(false);
5582 /* Start from historical data for 'rule' itself that are no longer tracked
5583 * in facets. This counts, for example, facets that have expired. */
5584 *packets
= rule
->packet_count
;
5585 *bytes
= rule
->byte_count
;
5589 rule_dpif_execute(struct rule_dpif
*rule
, const struct flow
*flow
,
5590 struct ofpbuf
*packet
)
5592 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(rule
->up
.ofproto
);
5593 struct initial_vals initial_vals
;
5594 struct dpif_flow_stats stats
;
5595 struct xlate_out xout
;
5596 struct xlate_in xin
;
5598 dpif_flow_stats_extract(flow
, packet
, time_msec(), &stats
);
5599 rule_credit_stats(rule
, &stats
);
5601 initial_vals
.vlan_tci
= flow
->vlan_tci
;
5602 xlate_in_init(&xin
, ofproto
, flow
, &initial_vals
, rule
, stats
.tcp_flags
,
5604 xin
.resubmit_stats
= &stats
;
5605 xlate_actions(&xin
, &xout
);
5607 execute_odp_actions(ofproto
, flow
, xout
.odp_actions
.data
,
5608 xout
.odp_actions
.size
, packet
);
5610 xlate_out_uninit(&xout
);
5614 rule_execute(struct rule
*rule
, const struct flow
*flow
,
5615 struct ofpbuf
*packet
)
5617 rule_dpif_execute(rule_dpif_cast(rule
), flow
, packet
);
5618 ofpbuf_delete(packet
);
5623 rule_modify_actions(struct rule
*rule_
)
5625 struct rule_dpif
*rule
= rule_dpif_cast(rule_
);
5627 complete_operation(rule
);
5630 /* Sends 'packet' out 'ofport'.
5631 * May modify 'packet'.
5632 * Returns 0 if successful, otherwise a positive errno value. */
5634 send_packet(const struct ofport_dpif
*ofport
, struct ofpbuf
*packet
)
5636 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofport
->up
.ofproto
);
5637 uint64_t odp_actions_stub
[1024 / 8];
5638 struct ofpbuf key
, odp_actions
;
5639 struct dpif_flow_stats stats
;
5640 struct odputil_keybuf keybuf
;
5641 struct ofpact_output output
;
5642 struct xlate_out xout
;
5643 struct xlate_in xin
;
5647 ofpbuf_use_stub(&odp_actions
, odp_actions_stub
, sizeof odp_actions_stub
);
5648 ofpbuf_use_stack(&key
, &keybuf
, sizeof keybuf
);
5650 /* Use OFPP_NONE as the in_port to avoid special packet processing. */
5651 flow_extract(packet
, 0, 0, NULL
, OFPP_NONE
, &flow
);
5652 odp_flow_key_from_flow(&key
, &flow
, ofp_port_to_odp_port(ofproto
,
5654 dpif_flow_stats_extract(&flow
, packet
, time_msec(), &stats
);
5656 ofpact_init(&output
.ofpact
, OFPACT_OUTPUT
, sizeof output
);
5657 output
.port
= ofport
->up
.ofp_port
;
5660 xlate_in_init(&xin
, ofproto
, &flow
, NULL
, NULL
, 0, packet
);
5661 xin
.ofpacts_len
= sizeof output
;
5662 xin
.ofpacts
= &output
.ofpact
;
5663 xin
.resubmit_stats
= &stats
;
5664 xlate_actions(&xin
, &xout
);
5666 error
= dpif_execute(ofproto
->backer
->dpif
,
5668 xout
.odp_actions
.data
, xout
.odp_actions
.size
,
5670 xlate_out_uninit(&xout
);
5673 VLOG_WARN_RL(&rl
, "%s: failed to send packet on port %s (%s)",
5674 ofproto
->up
.name
, netdev_get_name(ofport
->up
.netdev
),
5678 ofproto
->stats
.tx_packets
++;
5679 ofproto
->stats
.tx_bytes
+= packet
->size
;
5683 /* OpenFlow to datapath action translation. */
5685 static bool may_receive(const struct ofport_dpif
*, struct xlate_ctx
*);
5686 static void do_xlate_actions(const struct ofpact
*, size_t ofpacts_len
,
5687 struct xlate_ctx
*);
5688 static void xlate_normal(struct xlate_ctx
*);
5690 /* Composes an ODP action for a "slow path" action for 'flow' within 'ofproto'.
5691 * The action will state 'slow' as the reason that the action is in the slow
5692 * path. (This is purely informational: it allows a human viewing "ovs-dpctl
5693 * dump-flows" output to see why a flow is in the slow path.)
5695 * The 'stub_size' bytes in 'stub' will be used to store the action.
5696 * 'stub_size' must be large enough for the action.
5698 * The action and its size will be stored in '*actionsp' and '*actions_lenp',
5701 compose_slow_path(const struct ofproto_dpif
*ofproto
, const struct flow
*flow
,
5702 enum slow_path_reason slow
,
5703 uint64_t *stub
, size_t stub_size
,
5704 const struct nlattr
**actionsp
, size_t *actions_lenp
)
5706 union user_action_cookie cookie
;
5709 cookie
.type
= USER_ACTION_COOKIE_SLOW_PATH
;
5710 cookie
.slow_path
.unused
= 0;
5711 cookie
.slow_path
.reason
= slow
;
5713 ofpbuf_use_stack(&buf
, stub
, stub_size
);
5714 if (slow
& (SLOW_CFM
| SLOW_BFD
| SLOW_LACP
| SLOW_STP
)) {
5715 uint32_t pid
= dpif_port_get_pid(ofproto
->backer
->dpif
, UINT32_MAX
);
5716 odp_put_userspace_action(pid
, &cookie
, sizeof cookie
.slow_path
, &buf
);
5718 put_userspace_action(ofproto
, &buf
, flow
, &cookie
,
5719 sizeof cookie
.slow_path
);
5721 *actionsp
= buf
.data
;
5722 *actions_lenp
= buf
.size
;
5726 put_userspace_action(const struct ofproto_dpif
*ofproto
,
5727 struct ofpbuf
*odp_actions
,
5728 const struct flow
*flow
,
5729 const union user_action_cookie
*cookie
,
5730 const size_t cookie_size
)
5734 pid
= dpif_port_get_pid(ofproto
->backer
->dpif
,
5735 ofp_port_to_odp_port(ofproto
, flow
->in_port
));
5737 return odp_put_userspace_action(pid
, cookie
, cookie_size
, odp_actions
);
5740 /* Compose SAMPLE action for sFlow or IPFIX. The given probability is
5741 * the number of packets out of UINT32_MAX to sample. The given
5742 * cookie is passed back in the callback for each sampled packet.
5745 compose_sample_action(const struct ofproto_dpif
*ofproto
,
5746 struct ofpbuf
*odp_actions
,
5747 const struct flow
*flow
,
5748 const uint32_t probability
,
5749 const union user_action_cookie
*cookie
,
5750 const size_t cookie_size
)
5752 size_t sample_offset
, actions_offset
;
5755 sample_offset
= nl_msg_start_nested(odp_actions
, OVS_ACTION_ATTR_SAMPLE
);
5757 nl_msg_put_u32(odp_actions
, OVS_SAMPLE_ATTR_PROBABILITY
, probability
);
5759 actions_offset
= nl_msg_start_nested(odp_actions
, OVS_SAMPLE_ATTR_ACTIONS
);
5760 cookie_offset
= put_userspace_action(ofproto
, odp_actions
, flow
, cookie
,
5763 nl_msg_end_nested(odp_actions
, actions_offset
);
5764 nl_msg_end_nested(odp_actions
, sample_offset
);
5765 return cookie_offset
;
5769 compose_sflow_cookie(const struct ofproto_dpif
*ofproto
,
5770 ovs_be16 vlan_tci
, uint32_t odp_port
,
5771 unsigned int n_outputs
, union user_action_cookie
*cookie
)
5775 cookie
->type
= USER_ACTION_COOKIE_SFLOW
;
5776 cookie
->sflow
.vlan_tci
= vlan_tci
;
5778 /* See http://www.sflow.org/sflow_version_5.txt (search for "Input/output
5779 * port information") for the interpretation of cookie->output. */
5780 switch (n_outputs
) {
5782 /* 0x40000000 | 256 means "packet dropped for unknown reason". */
5783 cookie
->sflow
.output
= 0x40000000 | 256;
5787 ifindex
= dpif_sflow_odp_port_to_ifindex(ofproto
->sflow
, odp_port
);
5789 cookie
->sflow
.output
= ifindex
;
5794 /* 0x80000000 means "multiple output ports. */
5795 cookie
->sflow
.output
= 0x80000000 | n_outputs
;
5800 /* Compose SAMPLE action for sFlow bridge sampling. */
5802 compose_sflow_action(const struct ofproto_dpif
*ofproto
,
5803 struct ofpbuf
*odp_actions
,
5804 const struct flow
*flow
,
5807 uint32_t probability
;
5808 union user_action_cookie cookie
;
5810 if (!ofproto
->sflow
|| flow
->in_port
== OFPP_NONE
) {
5814 probability
= dpif_sflow_get_probability(ofproto
->sflow
);
5815 compose_sflow_cookie(ofproto
, htons(0), odp_port
,
5816 odp_port
== OVSP_NONE
? 0 : 1, &cookie
);
5818 return compose_sample_action(ofproto
, odp_actions
, flow
, probability
,
5819 &cookie
, sizeof cookie
.sflow
);
5823 compose_flow_sample_cookie(uint16_t probability
, uint32_t collector_set_id
,
5824 uint32_t obs_domain_id
, uint32_t obs_point_id
,
5825 union user_action_cookie
*cookie
)
5827 cookie
->type
= USER_ACTION_COOKIE_FLOW_SAMPLE
;
5828 cookie
->flow_sample
.probability
= probability
;
5829 cookie
->flow_sample
.collector_set_id
= collector_set_id
;
5830 cookie
->flow_sample
.obs_domain_id
= obs_domain_id
;
5831 cookie
->flow_sample
.obs_point_id
= obs_point_id
;
5835 compose_ipfix_cookie(union user_action_cookie
*cookie
)
5837 cookie
->type
= USER_ACTION_COOKIE_IPFIX
;
5840 /* Compose SAMPLE action for IPFIX bridge sampling. */
5842 compose_ipfix_action(const struct ofproto_dpif
*ofproto
,
5843 struct ofpbuf
*odp_actions
,
5844 const struct flow
*flow
)
5846 uint32_t probability
;
5847 union user_action_cookie cookie
;
5849 if (!ofproto
->ipfix
|| flow
->in_port
== OFPP_NONE
) {
5853 probability
= dpif_ipfix_get_bridge_exporter_probability(ofproto
->ipfix
);
5854 compose_ipfix_cookie(&cookie
);
5856 compose_sample_action(ofproto
, odp_actions
, flow
, probability
,
5857 &cookie
, sizeof cookie
.ipfix
);
5860 /* SAMPLE action for sFlow must be first action in any given list of
5861 * actions. At this point we do not have all information required to
5862 * build it. So try to build sample action as complete as possible. */
5864 add_sflow_action(struct xlate_ctx
*ctx
)
5866 ctx
->user_cookie_offset
= compose_sflow_action(ctx
->ofproto
,
5867 &ctx
->xout
->odp_actions
,
5868 &ctx
->xin
->flow
, OVSP_NONE
);
5869 ctx
->sflow_odp_port
= 0;
5870 ctx
->sflow_n_outputs
= 0;
5873 /* SAMPLE action for IPFIX must be 1st or 2nd action in any given list
5874 * of actions, eventually after the SAMPLE action for sFlow. */
5876 add_ipfix_action(struct xlate_ctx
*ctx
)
5878 compose_ipfix_action(ctx
->ofproto
, &ctx
->xout
->odp_actions
,
5882 /* Fix SAMPLE action according to data collected while composing ODP actions.
5883 * We need to fix SAMPLE actions OVS_SAMPLE_ATTR_ACTIONS attribute, i.e. nested
5884 * USERSPACE action's user-cookie which is required for sflow. */
5886 fix_sflow_action(struct xlate_ctx
*ctx
)
5888 const struct flow
*base
= &ctx
->base_flow
;
5889 union user_action_cookie
*cookie
;
5891 if (!ctx
->user_cookie_offset
) {
5895 cookie
= ofpbuf_at(&ctx
->xout
->odp_actions
, ctx
->user_cookie_offset
,
5896 sizeof cookie
->sflow
);
5897 ovs_assert(cookie
->type
== USER_ACTION_COOKIE_SFLOW
);
5899 compose_sflow_cookie(ctx
->ofproto
, base
->vlan_tci
,
5900 ctx
->sflow_odp_port
, ctx
->sflow_n_outputs
, cookie
);
5904 compose_output_action__(struct xlate_ctx
*ctx
, uint16_t ofp_port
,
5907 const struct ofport_dpif
*ofport
= get_ofp_port(ctx
->ofproto
, ofp_port
);
5908 ovs_be16 flow_vlan_tci
;
5909 uint32_t flow_skb_mark
;
5910 uint8_t flow_nw_tos
;
5911 struct priority_to_dscp
*pdscp
;
5912 uint32_t out_port
, odp_port
;
5914 /* If 'struct flow' gets additional metadata, we'll need to zero it out
5915 * before traversing a patch port. */
5916 BUILD_ASSERT_DECL(FLOW_WC_SEQ
== 20);
5919 xlate_report(ctx
, "Nonexistent output port");
5921 } else if (ofport
->up
.pp
.config
& OFPUTIL_PC_NO_FWD
) {
5922 xlate_report(ctx
, "OFPPC_NO_FWD set, skipping output");
5924 } else if (check_stp
&& !stp_forward_in_state(ofport
->stp_state
)) {
5925 xlate_report(ctx
, "STP not in forwarding state, skipping output");
5929 if (netdev_vport_is_patch(ofport
->up
.netdev
)) {
5930 struct ofport_dpif
*peer
= ofport_get_peer(ofport
);
5931 struct flow old_flow
= ctx
->xin
->flow
;
5932 const struct ofproto_dpif
*peer_ofproto
;
5933 enum slow_path_reason special
;
5934 struct ofport_dpif
*in_port
;
5937 xlate_report(ctx
, "Nonexistent patch port peer");
5941 peer_ofproto
= ofproto_dpif_cast(peer
->up
.ofproto
);
5942 if (peer_ofproto
->backer
!= ctx
->ofproto
->backer
) {
5943 xlate_report(ctx
, "Patch port peer on a different datapath");
5947 ctx
->ofproto
= ofproto_dpif_cast(peer
->up
.ofproto
);
5948 ctx
->xin
->flow
.in_port
= peer
->up
.ofp_port
;
5949 ctx
->xin
->flow
.metadata
= htonll(0);
5950 memset(&ctx
->xin
->flow
.tunnel
, 0, sizeof ctx
->xin
->flow
.tunnel
);
5951 memset(ctx
->xin
->flow
.regs
, 0, sizeof ctx
->xin
->flow
.regs
);
5953 in_port
= get_ofp_port(ctx
->ofproto
, ctx
->xin
->flow
.in_port
);
5954 special
= process_special(ctx
->ofproto
, &ctx
->xin
->flow
, in_port
,
5957 ctx
->xout
->slow
= special
;
5958 } else if (!in_port
|| may_receive(in_port
, ctx
)) {
5959 if (!in_port
|| stp_forward_in_state(in_port
->stp_state
)) {
5960 xlate_table_action(ctx
, ctx
->xin
->flow
.in_port
, 0, true);
5962 /* Forwarding is disabled by STP. Let OFPP_NORMAL and the
5963 * learning action look at the packet, then drop it. */
5964 struct flow old_base_flow
= ctx
->base_flow
;
5965 size_t old_size
= ctx
->xout
->odp_actions
.size
;
5966 xlate_table_action(ctx
, ctx
->xin
->flow
.in_port
, 0, true);
5967 ctx
->base_flow
= old_base_flow
;
5968 ctx
->xout
->odp_actions
.size
= old_size
;
5972 ctx
->xin
->flow
= old_flow
;
5973 ctx
->ofproto
= ofproto_dpif_cast(ofport
->up
.ofproto
);
5975 if (ctx
->xin
->resubmit_stats
) {
5976 netdev_vport_inc_tx(ofport
->up
.netdev
, ctx
->xin
->resubmit_stats
);
5977 netdev_vport_inc_rx(peer
->up
.netdev
, ctx
->xin
->resubmit_stats
);
5983 flow_vlan_tci
= ctx
->xin
->flow
.vlan_tci
;
5984 flow_skb_mark
= ctx
->xin
->flow
.skb_mark
;
5985 flow_nw_tos
= ctx
->xin
->flow
.nw_tos
;
5987 pdscp
= get_priority(ofport
, ctx
->xin
->flow
.skb_priority
);
5989 ctx
->xin
->flow
.nw_tos
&= ~IP_DSCP_MASK
;
5990 ctx
->xin
->flow
.nw_tos
|= pdscp
->dscp
;
5993 if (ofport
->tnl_port
) {
5994 /* Save tunnel metadata so that changes made due to
5995 * the Logical (tunnel) Port are not visible for any further
5996 * matches, while explicit set actions on tunnel metadata are.
5998 struct flow_tnl flow_tnl
= ctx
->xin
->flow
.tunnel
;
5999 odp_port
= tnl_port_send(ofport
->tnl_port
, &ctx
->xin
->flow
);
6000 if (odp_port
== OVSP_NONE
) {
6001 xlate_report(ctx
, "Tunneling decided against output");
6002 goto out
; /* restore flow_nw_tos */
6004 if (ctx
->xin
->flow
.tunnel
.ip_dst
== ctx
->orig_tunnel_ip_dst
) {
6005 xlate_report(ctx
, "Not tunneling to our own address");
6006 goto out
; /* restore flow_nw_tos */
6008 if (ctx
->xin
->resubmit_stats
) {
6009 netdev_vport_inc_tx(ofport
->up
.netdev
, ctx
->xin
->resubmit_stats
);
6011 out_port
= odp_port
;
6012 commit_odp_tunnel_action(&ctx
->xin
->flow
, &ctx
->base_flow
,
6013 &ctx
->xout
->odp_actions
);
6014 ctx
->xin
->flow
.tunnel
= flow_tnl
; /* Restore tunnel metadata */
6016 uint16_t vlandev_port
;
6017 odp_port
= ofport
->odp_port
;
6018 vlandev_port
= vsp_realdev_to_vlandev(ctx
->ofproto
, ofp_port
,
6019 ctx
->xin
->flow
.vlan_tci
);
6020 if (vlandev_port
== ofp_port
) {
6021 out_port
= odp_port
;
6023 out_port
= ofp_port_to_odp_port(ctx
->ofproto
, vlandev_port
);
6024 ctx
->xin
->flow
.vlan_tci
= htons(0);
6026 ctx
->xin
->flow
.skb_mark
&= ~IPSEC_MARK
;
6028 commit_odp_actions(&ctx
->xin
->flow
, &ctx
->base_flow
,
6029 &ctx
->xout
->odp_actions
);
6030 nl_msg_put_u32(&ctx
->xout
->odp_actions
, OVS_ACTION_ATTR_OUTPUT
, out_port
);
6032 ctx
->sflow_odp_port
= odp_port
;
6033 ctx
->sflow_n_outputs
++;
6034 ctx
->xout
->nf_output_iface
= ofp_port
;
6037 ctx
->xin
->flow
.vlan_tci
= flow_vlan_tci
;
6038 ctx
->xin
->flow
.skb_mark
= flow_skb_mark
;
6040 ctx
->xin
->flow
.nw_tos
= flow_nw_tos
;
6044 compose_output_action(struct xlate_ctx
*ctx
, uint16_t ofp_port
)
6046 compose_output_action__(ctx
, ofp_port
, true);
6050 tag_the_flow(struct xlate_ctx
*ctx
, struct rule_dpif
*rule
)
6052 struct ofproto_dpif
*ofproto
= ctx
->ofproto
;
6053 uint8_t table_id
= ctx
->table_id
;
6055 if (table_id
> 0 && table_id
< N_TABLES
) {
6056 struct table_dpif
*table
= &ofproto
->tables
[table_id
];
6057 if (table
->other_table
) {
6058 ctx
->xout
->tags
|= (rule
&& rule
->tag
6060 : rule_calculate_tag(&ctx
->xin
->flow
,
6061 &table
->other_table
->mask
,
6067 /* Common rule processing in one place to avoid duplicating code. */
6068 static struct rule_dpif
*
6069 ctx_rule_hooks(struct xlate_ctx
*ctx
, struct rule_dpif
*rule
,
6072 if (ctx
->xin
->resubmit_hook
) {
6073 ctx
->xin
->resubmit_hook(ctx
, rule
);
6075 if (rule
== NULL
&& may_packet_in
) {
6077 * check if table configuration flags
6078 * OFPTC_TABLE_MISS_CONTROLLER, default.
6079 * OFPTC_TABLE_MISS_CONTINUE,
6080 * OFPTC_TABLE_MISS_DROP
6081 * When OF1.0, OFPTC_TABLE_MISS_CONTINUE is used. What to do?
6083 rule
= rule_dpif_miss_rule(ctx
->ofproto
, &ctx
->xin
->flow
);
6085 if (rule
&& ctx
->xin
->resubmit_stats
) {
6086 rule_credit_stats(rule
, ctx
->xin
->resubmit_stats
);
6092 xlate_table_action(struct xlate_ctx
*ctx
,
6093 uint16_t in_port
, uint8_t table_id
, bool may_packet_in
)
6095 if (ctx
->recurse
< MAX_RESUBMIT_RECURSION
) {
6096 struct rule_dpif
*rule
;
6097 uint16_t old_in_port
= ctx
->xin
->flow
.in_port
;
6098 uint8_t old_table_id
= ctx
->table_id
;
6100 ctx
->table_id
= table_id
;
6102 /* Look up a flow with 'in_port' as the input port. */
6103 ctx
->xin
->flow
.in_port
= in_port
;
6104 rule
= rule_dpif_lookup__(ctx
->ofproto
, &ctx
->xin
->flow
, table_id
);
6106 tag_the_flow(ctx
, rule
);
6108 /* Restore the original input port. Otherwise OFPP_NORMAL and
6109 * OFPP_IN_PORT will have surprising behavior. */
6110 ctx
->xin
->flow
.in_port
= old_in_port
;
6112 rule
= ctx_rule_hooks(ctx
, rule
, may_packet_in
);
6115 struct rule_dpif
*old_rule
= ctx
->rule
;
6119 do_xlate_actions(rule
->up
.ofpacts
, rule
->up
.ofpacts_len
, ctx
);
6120 ctx
->rule
= old_rule
;
6124 ctx
->table_id
= old_table_id
;
6126 static struct vlog_rate_limit recurse_rl
= VLOG_RATE_LIMIT_INIT(1, 1);
6128 VLOG_ERR_RL(&recurse_rl
, "resubmit actions recursed over %d times",
6129 MAX_RESUBMIT_RECURSION
);
6130 ctx
->max_resubmit_trigger
= true;
6135 xlate_ofpact_resubmit(struct xlate_ctx
*ctx
,
6136 const struct ofpact_resubmit
*resubmit
)
6141 in_port
= resubmit
->in_port
;
6142 if (in_port
== OFPP_IN_PORT
) {
6143 in_port
= ctx
->xin
->flow
.in_port
;
6146 table_id
= resubmit
->table_id
;
6147 if (table_id
== 255) {
6148 table_id
= ctx
->table_id
;
6151 xlate_table_action(ctx
, in_port
, table_id
, false);
6155 flood_packets(struct xlate_ctx
*ctx
, bool all
)
6157 struct ofport_dpif
*ofport
;
6159 HMAP_FOR_EACH (ofport
, up
.hmap_node
, &ctx
->ofproto
->up
.ports
) {
6160 uint16_t ofp_port
= ofport
->up
.ofp_port
;
6162 if (ofp_port
== ctx
->xin
->flow
.in_port
) {
6167 compose_output_action__(ctx
, ofp_port
, false);
6168 } else if (!(ofport
->up
.pp
.config
& OFPUTIL_PC_NO_FLOOD
)) {
6169 compose_output_action(ctx
, ofp_port
);
6173 ctx
->xout
->nf_output_iface
= NF_OUT_FLOOD
;
6177 execute_controller_action(struct xlate_ctx
*ctx
, int len
,
6178 enum ofp_packet_in_reason reason
,
6179 uint16_t controller_id
)
6181 struct ofputil_packet_in pin
;
6182 struct ofpbuf
*packet
;
6184 ovs_assert(!ctx
->xout
->slow
|| ctx
->xout
->slow
== SLOW_CONTROLLER
);
6185 ctx
->xout
->slow
= SLOW_CONTROLLER
;
6186 if (!ctx
->xin
->packet
) {
6190 packet
= ofpbuf_clone(ctx
->xin
->packet
);
6192 if (packet
->l2
&& packet
->l3
) {
6193 struct eth_header
*eh
;
6194 uint16_t mpls_depth
;
6196 eth_pop_vlan(packet
);
6199 memcpy(eh
->eth_src
, ctx
->xin
->flow
.dl_src
, sizeof eh
->eth_src
);
6200 memcpy(eh
->eth_dst
, ctx
->xin
->flow
.dl_dst
, sizeof eh
->eth_dst
);
6202 if (ctx
->xin
->flow
.vlan_tci
& htons(VLAN_CFI
)) {
6203 eth_push_vlan(packet
, ctx
->xin
->flow
.vlan_tci
);
6206 mpls_depth
= eth_mpls_depth(packet
);
6208 if (mpls_depth
< ctx
->xin
->flow
.mpls_depth
) {
6209 push_mpls(packet
, ctx
->xin
->flow
.dl_type
, ctx
->xin
->flow
.mpls_lse
);
6210 } else if (mpls_depth
> ctx
->xin
->flow
.mpls_depth
) {
6211 pop_mpls(packet
, ctx
->xin
->flow
.dl_type
);
6212 } else if (mpls_depth
) {
6213 set_mpls_lse(packet
, ctx
->xin
->flow
.mpls_lse
);
6217 if (ctx
->xin
->flow
.dl_type
== htons(ETH_TYPE_IP
)) {
6218 packet_set_ipv4(packet
, ctx
->xin
->flow
.nw_src
,
6219 ctx
->xin
->flow
.nw_dst
, ctx
->xin
->flow
.nw_tos
,
6220 ctx
->xin
->flow
.nw_ttl
);
6224 if (ctx
->xin
->flow
.nw_proto
== IPPROTO_TCP
) {
6225 packet_set_tcp_port(packet
, ctx
->xin
->flow
.tp_src
,
6226 ctx
->xin
->flow
.tp_dst
);
6227 } else if (ctx
->xin
->flow
.nw_proto
== IPPROTO_UDP
) {
6228 packet_set_udp_port(packet
, ctx
->xin
->flow
.tp_src
,
6229 ctx
->xin
->flow
.tp_dst
);
6235 pin
.packet
= packet
->data
;
6236 pin
.packet_len
= packet
->size
;
6237 pin
.reason
= reason
;
6238 pin
.controller_id
= controller_id
;
6239 pin
.table_id
= ctx
->table_id
;
6240 pin
.cookie
= ctx
->rule
? ctx
->rule
->up
.flow_cookie
: 0;
6243 flow_get_metadata(&ctx
->xin
->flow
, &pin
.fmd
);
6245 connmgr_send_packet_in(ctx
->ofproto
->up
.connmgr
, &pin
);
6246 ofpbuf_delete(packet
);
6250 execute_mpls_push_action(struct xlate_ctx
*ctx
, ovs_be16 eth_type
)
6252 ovs_assert(eth_type_mpls(eth_type
));
6254 if (ctx
->base_flow
.mpls_depth
) {
6255 ctx
->xin
->flow
.mpls_lse
&= ~htonl(MPLS_BOS_MASK
);
6256 ctx
->xin
->flow
.mpls_depth
++;
6261 if (ctx
->xin
->flow
.dl_type
== htons(ETH_TYPE_IPV6
)) {
6262 label
= htonl(0x2); /* IPV6 Explicit Null. */
6264 label
= htonl(0x0); /* IPV4 Explicit Null. */
6266 tc
= (ctx
->xin
->flow
.nw_tos
& IP_DSCP_MASK
) >> 2;
6267 ttl
= ctx
->xin
->flow
.nw_ttl
? ctx
->xin
->flow
.nw_ttl
: 0x40;
6268 ctx
->xin
->flow
.mpls_lse
= set_mpls_lse_values(ttl
, tc
, 1, label
);
6269 ctx
->xin
->flow
.mpls_depth
= 1;
6271 ctx
->xin
->flow
.dl_type
= eth_type
;
6275 execute_mpls_pop_action(struct xlate_ctx
*ctx
, ovs_be16 eth_type
)
6277 ovs_assert(eth_type_mpls(ctx
->xin
->flow
.dl_type
));
6278 ovs_assert(!eth_type_mpls(eth_type
));
6280 if (ctx
->xin
->flow
.mpls_depth
) {
6281 ctx
->xin
->flow
.mpls_depth
--;
6282 ctx
->xin
->flow
.mpls_lse
= htonl(0);
6283 if (!ctx
->xin
->flow
.mpls_depth
) {
6284 ctx
->xin
->flow
.dl_type
= eth_type
;
6290 compose_dec_ttl(struct xlate_ctx
*ctx
, struct ofpact_cnt_ids
*ids
)
6292 if (ctx
->xin
->flow
.dl_type
!= htons(ETH_TYPE_IP
) &&
6293 ctx
->xin
->flow
.dl_type
!= htons(ETH_TYPE_IPV6
)) {
6297 if (ctx
->xin
->flow
.nw_ttl
> 1) {
6298 ctx
->xin
->flow
.nw_ttl
--;
6303 for (i
= 0; i
< ids
->n_controllers
; i
++) {
6304 execute_controller_action(ctx
, UINT16_MAX
, OFPR_INVALID_TTL
,
6308 /* Stop processing for current table. */
6314 execute_set_mpls_ttl_action(struct xlate_ctx
*ctx
, uint8_t ttl
)
6316 if (!eth_type_mpls(ctx
->xin
->flow
.dl_type
)) {
6320 set_mpls_lse_ttl(&ctx
->xin
->flow
.mpls_lse
, ttl
);
6325 execute_dec_mpls_ttl_action(struct xlate_ctx
*ctx
)
6327 uint8_t ttl
= mpls_lse_to_ttl(ctx
->xin
->flow
.mpls_lse
);
6329 if (!eth_type_mpls(ctx
->xin
->flow
.dl_type
)) {
6335 set_mpls_lse_ttl(&ctx
->xin
->flow
.mpls_lse
, ttl
);
6338 execute_controller_action(ctx
, UINT16_MAX
, OFPR_INVALID_TTL
, 0);
6340 /* Stop processing for current table. */
6346 xlate_output_action(struct xlate_ctx
*ctx
,
6347 uint16_t port
, uint16_t max_len
, bool may_packet_in
)
6349 uint16_t prev_nf_output_iface
= ctx
->xout
->nf_output_iface
;
6351 ctx
->xout
->nf_output_iface
= NF_OUT_DROP
;
6355 compose_output_action(ctx
, ctx
->xin
->flow
.in_port
);
6358 xlate_table_action(ctx
, ctx
->xin
->flow
.in_port
, 0, may_packet_in
);
6364 flood_packets(ctx
, false);
6367 flood_packets(ctx
, true);
6369 case OFPP_CONTROLLER
:
6370 execute_controller_action(ctx
, max_len
, OFPR_ACTION
, 0);
6376 if (port
!= ctx
->xin
->flow
.in_port
) {
6377 compose_output_action(ctx
, port
);
6379 xlate_report(ctx
, "skipping output to input port");
6384 if (prev_nf_output_iface
== NF_OUT_FLOOD
) {
6385 ctx
->xout
->nf_output_iface
= NF_OUT_FLOOD
;
6386 } else if (ctx
->xout
->nf_output_iface
== NF_OUT_DROP
) {
6387 ctx
->xout
->nf_output_iface
= prev_nf_output_iface
;
6388 } else if (prev_nf_output_iface
!= NF_OUT_DROP
&&
6389 ctx
->xout
->nf_output_iface
!= NF_OUT_FLOOD
) {
6390 ctx
->xout
->nf_output_iface
= NF_OUT_MULTI
;
6395 xlate_output_reg_action(struct xlate_ctx
*ctx
,
6396 const struct ofpact_output_reg
*or)
6398 uint64_t port
= mf_get_subfield(&or->src
, &ctx
->xin
->flow
);
6399 if (port
<= UINT16_MAX
) {
6400 xlate_output_action(ctx
, port
, or->max_len
, false);
6405 xlate_enqueue_action(struct xlate_ctx
*ctx
,
6406 const struct ofpact_enqueue
*enqueue
)
6408 uint16_t ofp_port
= enqueue
->port
;
6409 uint32_t queue_id
= enqueue
->queue
;
6410 uint32_t flow_priority
, priority
;
6413 /* Translate queue to priority. */
6414 error
= dpif_queue_to_priority(ctx
->ofproto
->backer
->dpif
,
6415 queue_id
, &priority
);
6417 /* Fall back to ordinary output action. */
6418 xlate_output_action(ctx
, enqueue
->port
, 0, false);
6422 /* Check output port. */
6423 if (ofp_port
== OFPP_IN_PORT
) {
6424 ofp_port
= ctx
->xin
->flow
.in_port
;
6425 } else if (ofp_port
== ctx
->xin
->flow
.in_port
) {
6429 /* Add datapath actions. */
6430 flow_priority
= ctx
->xin
->flow
.skb_priority
;
6431 ctx
->xin
->flow
.skb_priority
= priority
;
6432 compose_output_action(ctx
, ofp_port
);
6433 ctx
->xin
->flow
.skb_priority
= flow_priority
;
6435 /* Update NetFlow output port. */
6436 if (ctx
->xout
->nf_output_iface
== NF_OUT_DROP
) {
6437 ctx
->xout
->nf_output_iface
= ofp_port
;
6438 } else if (ctx
->xout
->nf_output_iface
!= NF_OUT_FLOOD
) {
6439 ctx
->xout
->nf_output_iface
= NF_OUT_MULTI
;
6444 xlate_set_queue_action(struct xlate_ctx
*ctx
, uint32_t queue_id
)
6446 uint32_t skb_priority
;
6448 if (!dpif_queue_to_priority(ctx
->ofproto
->backer
->dpif
,
6449 queue_id
, &skb_priority
)) {
6450 ctx
->xin
->flow
.skb_priority
= skb_priority
;
6452 /* Couldn't translate queue to a priority. Nothing to do. A warning
6453 * has already been logged. */
6458 slave_enabled_cb(uint16_t ofp_port
, void *ofproto_
)
6460 struct ofproto_dpif
*ofproto
= ofproto_
;
6461 struct ofport_dpif
*port
;
6471 case OFPP_CONTROLLER
: /* Not supported by the bundle action. */
6474 port
= get_ofp_port(ofproto
, ofp_port
);
6475 return port
? port
->may_enable
: false;
6480 xlate_bundle_action(struct xlate_ctx
*ctx
,
6481 const struct ofpact_bundle
*bundle
)
6485 port
= bundle_execute(bundle
, &ctx
->xin
->flow
, slave_enabled_cb
,
6487 if (bundle
->dst
.field
) {
6488 nxm_reg_load(&bundle
->dst
, port
, &ctx
->xin
->flow
);
6490 xlate_output_action(ctx
, port
, 0, false);
6495 xlate_learn_action(struct xlate_ctx
*ctx
,
6496 const struct ofpact_learn
*learn
)
6498 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(5, 1);
6499 struct ofputil_flow_mod fm
;
6500 uint64_t ofpacts_stub
[1024 / 8];
6501 struct ofpbuf ofpacts
;
6504 ofpbuf_use_stack(&ofpacts
, ofpacts_stub
, sizeof ofpacts_stub
);
6505 learn_execute(learn
, &ctx
->xin
->flow
, &fm
, &ofpacts
);
6507 error
= ofproto_flow_mod(&ctx
->ofproto
->up
, &fm
);
6508 if (error
&& !VLOG_DROP_WARN(&rl
)) {
6509 VLOG_WARN("learning action failed to modify flow table (%s)",
6510 ofperr_get_name(error
));
6513 ofpbuf_uninit(&ofpacts
);
6516 /* Reduces '*timeout' to no more than 'max'. A value of zero in either case
6517 * means "infinite". */
6519 reduce_timeout(uint16_t max
, uint16_t *timeout
)
6521 if (max
&& (!*timeout
|| *timeout
> max
)) {
6527 xlate_fin_timeout(struct xlate_ctx
*ctx
,
6528 const struct ofpact_fin_timeout
*oft
)
6530 if (ctx
->xin
->tcp_flags
& (TCP_FIN
| TCP_RST
) && ctx
->rule
) {
6531 struct rule_dpif
*rule
= ctx
->rule
;
6533 reduce_timeout(oft
->fin_idle_timeout
, &rule
->up
.idle_timeout
);
6534 reduce_timeout(oft
->fin_hard_timeout
, &rule
->up
.hard_timeout
);
6539 xlate_sample_action(struct xlate_ctx
*ctx
,
6540 const struct ofpact_sample
*os
)
6542 union user_action_cookie cookie
;
6543 /* Scale the probability from 16-bit to 32-bit while representing
6544 * the same percentage. */
6545 uint32_t probability
= (os
->probability
<< 16) | os
->probability
;
6547 commit_odp_actions(&ctx
->xin
->flow
, &ctx
->base_flow
,
6548 &ctx
->xout
->odp_actions
);
6550 compose_flow_sample_cookie(os
->probability
, os
->collector_set_id
,
6551 os
->obs_domain_id
, os
->obs_point_id
, &cookie
);
6552 compose_sample_action(ctx
->ofproto
, &ctx
->xout
->odp_actions
, &ctx
->xin
->flow
,
6553 probability
, &cookie
, sizeof cookie
.flow_sample
);
6557 may_receive(const struct ofport_dpif
*port
, struct xlate_ctx
*ctx
)
6559 if (port
->up
.pp
.config
& (eth_addr_equals(ctx
->xin
->flow
.dl_dst
,
6561 ? OFPUTIL_PC_NO_RECV_STP
6562 : OFPUTIL_PC_NO_RECV
)) {
6566 /* Only drop packets here if both forwarding and learning are
6567 * disabled. If just learning is enabled, we need to have
6568 * OFPP_NORMAL and the learning action have a look at the packet
6569 * before we can drop it. */
6570 if (!stp_forward_in_state(port
->stp_state
)
6571 && !stp_learn_in_state(port
->stp_state
)) {
6579 tunnel_ecn_ok(struct xlate_ctx
*ctx
)
6581 if (is_ip_any(&ctx
->base_flow
)
6582 && (ctx
->xin
->flow
.tunnel
.ip_tos
& IP_ECN_MASK
) == IP_ECN_CE
) {
6583 if ((ctx
->base_flow
.nw_tos
& IP_ECN_MASK
) == IP_ECN_NOT_ECT
) {
6584 VLOG_WARN_RL(&rl
, "dropping tunnel packet marked ECN CE"
6585 " but is not ECN capable");
6588 /* Set the ECN CE value in the tunneled packet. */
6589 ctx
->xin
->flow
.nw_tos
|= IP_ECN_CE
;
6597 do_xlate_actions(const struct ofpact
*ofpacts
, size_t ofpacts_len
,
6598 struct xlate_ctx
*ctx
)
6600 bool was_evictable
= true;
6601 const struct ofpact
*a
;
6604 /* Don't let the rule we're working on get evicted underneath us. */
6605 was_evictable
= ctx
->rule
->up
.evictable
;
6606 ctx
->rule
->up
.evictable
= false;
6609 do_xlate_actions_again
:
6610 OFPACT_FOR_EACH (a
, ofpacts
, ofpacts_len
) {
6611 struct ofpact_controller
*controller
;
6612 const struct ofpact_metadata
*metadata
;
6620 xlate_output_action(ctx
, ofpact_get_OUTPUT(a
)->port
,
6621 ofpact_get_OUTPUT(a
)->max_len
, true);
6624 case OFPACT_CONTROLLER
:
6625 controller
= ofpact_get_CONTROLLER(a
);
6626 execute_controller_action(ctx
, controller
->max_len
,
6628 controller
->controller_id
);
6631 case OFPACT_ENQUEUE
:
6632 xlate_enqueue_action(ctx
, ofpact_get_ENQUEUE(a
));
6635 case OFPACT_SET_VLAN_VID
:
6636 ctx
->xin
->flow
.vlan_tci
&= ~htons(VLAN_VID_MASK
);
6637 ctx
->xin
->flow
.vlan_tci
|=
6638 (htons(ofpact_get_SET_VLAN_VID(a
)->vlan_vid
)
6642 case OFPACT_SET_VLAN_PCP
:
6643 ctx
->xin
->flow
.vlan_tci
&= ~htons(VLAN_PCP_MASK
);
6644 ctx
->xin
->flow
.vlan_tci
|=
6645 htons((ofpact_get_SET_VLAN_PCP(a
)->vlan_pcp
<< VLAN_PCP_SHIFT
)
6649 case OFPACT_STRIP_VLAN
:
6650 ctx
->xin
->flow
.vlan_tci
= htons(0);
6653 case OFPACT_PUSH_VLAN
:
6654 /* XXX 802.1AD(QinQ) */
6655 ctx
->xin
->flow
.vlan_tci
= htons(VLAN_CFI
);
6658 case OFPACT_SET_ETH_SRC
:
6659 memcpy(ctx
->xin
->flow
.dl_src
, ofpact_get_SET_ETH_SRC(a
)->mac
,
6663 case OFPACT_SET_ETH_DST
:
6664 memcpy(ctx
->xin
->flow
.dl_dst
, ofpact_get_SET_ETH_DST(a
)->mac
,
6668 case OFPACT_SET_IPV4_SRC
:
6669 if (ctx
->xin
->flow
.dl_type
== htons(ETH_TYPE_IP
)) {
6670 ctx
->xin
->flow
.nw_src
= ofpact_get_SET_IPV4_SRC(a
)->ipv4
;
6674 case OFPACT_SET_IPV4_DST
:
6675 if (ctx
->xin
->flow
.dl_type
== htons(ETH_TYPE_IP
)) {
6676 ctx
->xin
->flow
.nw_dst
= ofpact_get_SET_IPV4_DST(a
)->ipv4
;
6680 case OFPACT_SET_IPV4_DSCP
:
6681 /* OpenFlow 1.0 only supports IPv4. */
6682 if (ctx
->xin
->flow
.dl_type
== htons(ETH_TYPE_IP
)) {
6683 ctx
->xin
->flow
.nw_tos
&= ~IP_DSCP_MASK
;
6684 ctx
->xin
->flow
.nw_tos
|= ofpact_get_SET_IPV4_DSCP(a
)->dscp
;
6688 case OFPACT_SET_L4_SRC_PORT
:
6689 if (is_ip_any(&ctx
->xin
->flow
)) {
6690 ctx
->xin
->flow
.tp_src
=
6691 htons(ofpact_get_SET_L4_SRC_PORT(a
)->port
);
6695 case OFPACT_SET_L4_DST_PORT
:
6696 if (is_ip_any(&ctx
->xin
->flow
)) {
6697 ctx
->xin
->flow
.tp_dst
=
6698 htons(ofpact_get_SET_L4_DST_PORT(a
)->port
);
6702 case OFPACT_RESUBMIT
:
6703 xlate_ofpact_resubmit(ctx
, ofpact_get_RESUBMIT(a
));
6706 case OFPACT_SET_TUNNEL
:
6707 ctx
->xin
->flow
.tunnel
.tun_id
=
6708 htonll(ofpact_get_SET_TUNNEL(a
)->tun_id
);
6711 case OFPACT_SET_QUEUE
:
6712 xlate_set_queue_action(ctx
, ofpact_get_SET_QUEUE(a
)->queue_id
);
6715 case OFPACT_POP_QUEUE
:
6716 ctx
->xin
->flow
.skb_priority
= ctx
->orig_skb_priority
;
6719 case OFPACT_REG_MOVE
:
6720 nxm_execute_reg_move(ofpact_get_REG_MOVE(a
), &ctx
->xin
->flow
);
6723 case OFPACT_REG_LOAD
:
6724 nxm_execute_reg_load(ofpact_get_REG_LOAD(a
), &ctx
->xin
->flow
);
6727 case OFPACT_STACK_PUSH
:
6728 nxm_execute_stack_push(ofpact_get_STACK_PUSH(a
), &ctx
->xin
->flow
,
6732 case OFPACT_STACK_POP
:
6733 nxm_execute_stack_pop(ofpact_get_STACK_POP(a
), &ctx
->xin
->flow
,
6737 case OFPACT_PUSH_MPLS
:
6738 execute_mpls_push_action(ctx
, ofpact_get_PUSH_MPLS(a
)->ethertype
);
6741 case OFPACT_POP_MPLS
:
6742 execute_mpls_pop_action(ctx
, ofpact_get_POP_MPLS(a
)->ethertype
);
6745 case OFPACT_SET_MPLS_TTL
:
6746 if (execute_set_mpls_ttl_action(ctx
,
6747 ofpact_get_SET_MPLS_TTL(a
)->ttl
)) {
6752 case OFPACT_DEC_MPLS_TTL
:
6753 if (execute_dec_mpls_ttl_action(ctx
)) {
6758 case OFPACT_DEC_TTL
:
6759 if (compose_dec_ttl(ctx
, ofpact_get_DEC_TTL(a
))) {
6765 /* Nothing to do. */
6768 case OFPACT_MULTIPATH
:
6769 multipath_execute(ofpact_get_MULTIPATH(a
), &ctx
->xin
->flow
);
6773 ctx
->ofproto
->has_bundle_action
= true;
6774 xlate_bundle_action(ctx
, ofpact_get_BUNDLE(a
));
6777 case OFPACT_OUTPUT_REG
:
6778 xlate_output_reg_action(ctx
, ofpact_get_OUTPUT_REG(a
));
6782 ctx
->xout
->has_learn
= true;
6783 if (ctx
->xin
->may_learn
) {
6784 xlate_learn_action(ctx
, ofpact_get_LEARN(a
));
6792 case OFPACT_FIN_TIMEOUT
:
6793 ctx
->xout
->has_fin_timeout
= true;
6794 xlate_fin_timeout(ctx
, ofpact_get_FIN_TIMEOUT(a
));
6797 case OFPACT_CLEAR_ACTIONS
:
6799 * Nothing to do because writa-actions is not supported for now.
6800 * When writa-actions is supported, clear-actions also must
6801 * be supported at the same time.
6805 case OFPACT_WRITE_METADATA
:
6806 metadata
= ofpact_get_WRITE_METADATA(a
);
6807 ctx
->xin
->flow
.metadata
&= ~metadata
->mask
;
6808 ctx
->xin
->flow
.metadata
|= metadata
->metadata
& metadata
->mask
;
6811 case OFPACT_GOTO_TABLE
: {
6812 /* It is assumed that goto-table is the last action. */
6813 struct ofpact_goto_table
*ogt
= ofpact_get_GOTO_TABLE(a
);
6814 struct rule_dpif
*rule
;
6816 ovs_assert(ctx
->table_id
< ogt
->table_id
);
6818 ctx
->table_id
= ogt
->table_id
;
6820 /* Look up a flow from the new table. */
6821 rule
= rule_dpif_lookup__(ctx
->ofproto
, &ctx
->xin
->flow
, ctx
->table_id
);
6823 tag_the_flow(ctx
, rule
);
6825 rule
= ctx_rule_hooks(ctx
, rule
, true);
6829 ctx
->rule
->up
.evictable
= was_evictable
;
6832 was_evictable
= rule
->up
.evictable
;
6833 rule
->up
.evictable
= false;
6835 /* Tail recursion removal. */
6836 ofpacts
= rule
->up
.ofpacts
;
6837 ofpacts_len
= rule
->up
.ofpacts_len
;
6838 goto do_xlate_actions_again
;
6844 xlate_sample_action(ctx
, ofpact_get_SAMPLE(a
));
6851 ctx
->rule
->up
.evictable
= was_evictable
;
6856 xlate_in_init(struct xlate_in
*xin
, struct ofproto_dpif
*ofproto
,
6857 const struct flow
*flow
,
6858 const struct initial_vals
*initial_vals
,
6859 struct rule_dpif
*rule
, uint8_t tcp_flags
,
6860 const struct ofpbuf
*packet
)
6862 xin
->ofproto
= ofproto
;
6864 xin
->packet
= packet
;
6865 xin
->may_learn
= packet
!= NULL
;
6867 xin
->ofpacts
= NULL
;
6868 xin
->ofpacts_len
= 0;
6869 xin
->tcp_flags
= tcp_flags
;
6870 xin
->resubmit_hook
= NULL
;
6871 xin
->report_hook
= NULL
;
6872 xin
->resubmit_stats
= NULL
;
6875 xin
->initial_vals
= *initial_vals
;
6877 xin
->initial_vals
.vlan_tci
= xin
->flow
.vlan_tci
;
6882 xlate_out_uninit(struct xlate_out
*xout
)
6885 ofpbuf_uninit(&xout
->odp_actions
);
6889 /* Translates the 'ofpacts_len' bytes of "struct ofpacts" starting at 'ofpacts'
6890 * into datapath actions in 'odp_actions', using 'ctx'. */
6892 xlate_actions(struct xlate_in
*xin
, struct xlate_out
*xout
)
6894 /* Normally false. Set to true if we ever hit MAX_RESUBMIT_RECURSION, so
6895 * that in the future we always keep a copy of the original flow for
6896 * tracing purposes. */
6897 static bool hit_resubmit_limit
;
6899 enum slow_path_reason special
;
6900 const struct ofpact
*ofpacts
;
6901 struct ofport_dpif
*in_port
;
6902 struct flow orig_flow
;
6903 struct xlate_ctx ctx
;
6906 COVERAGE_INC(ofproto_dpif_xlate
);
6908 /* Flow initialization rules:
6909 * - 'base_flow' must match the kernel's view of the packet at the
6910 * time that action processing starts. 'flow' represents any
6911 * transformations we wish to make through actions.
6912 * - By default 'base_flow' and 'flow' are the same since the input
6913 * packet matches the output before any actions are applied.
6914 * - When using VLAN splinters, 'base_flow''s VLAN is set to the value
6915 * of the received packet as seen by the kernel. If we later output
6916 * to another device without any modifications this will cause us to
6917 * insert a new tag since the original one was stripped off by the
6919 * - Tunnel metadata as received is retained in 'flow'. This allows
6920 * tunnel metadata matching also in later tables.
6921 * Since a kernel action for setting the tunnel metadata will only be
6922 * generated with actual tunnel output, changing the tunnel metadata
6923 * values in 'flow' (such as tun_id) will only have effect with a later
6924 * tunnel output action.
6925 * - Tunnel 'base_flow' is completely cleared since that is what the
6926 * kernel does. If we wish to maintain the original values an action
6927 * needs to be generated. */
6932 ctx
.ofproto
= xin
->ofproto
;
6933 ctx
.rule
= xin
->rule
;
6935 ctx
.base_flow
= ctx
.xin
->flow
;
6936 ctx
.base_flow
.vlan_tci
= xin
->initial_vals
.vlan_tci
;
6937 memset(&ctx
.base_flow
.tunnel
, 0, sizeof ctx
.base_flow
.tunnel
);
6938 ctx
.orig_tunnel_ip_dst
= ctx
.xin
->flow
.tunnel
.ip_dst
;
6942 ctx
.xout
->has_learn
= false;
6943 ctx
.xout
->has_normal
= false;
6944 ctx
.xout
->has_fin_timeout
= false;
6945 ctx
.xout
->nf_output_iface
= NF_OUT_DROP
;
6946 ctx
.xout
->mirrors
= 0;
6948 ofpbuf_use_stub(&ctx
.xout
->odp_actions
, ctx
.xout
->odp_actions_stub
,
6949 sizeof ctx
.xout
->odp_actions_stub
);
6950 ofpbuf_reserve(&ctx
.xout
->odp_actions
, NL_A_U32_SIZE
);
6953 ctx
.max_resubmit_trigger
= false;
6954 ctx
.orig_skb_priority
= ctx
.xin
->flow
.skb_priority
;
6959 ofpacts
= xin
->ofpacts
;
6960 ofpacts_len
= xin
->ofpacts_len
;
6961 } else if (xin
->rule
) {
6962 ofpacts
= xin
->rule
->up
.ofpacts
;
6963 ofpacts_len
= xin
->rule
->up
.ofpacts_len
;
6968 ofpbuf_use_stub(&ctx
.stack
, ctx
.init_stack
, sizeof ctx
.init_stack
);
6970 if (ctx
.ofproto
->has_mirrors
|| hit_resubmit_limit
) {
6971 /* Do this conditionally because the copy is expensive enough that it
6972 * shows up in profiles. */
6973 orig_flow
= ctx
.xin
->flow
;
6976 if (ctx
.xin
->flow
.nw_frag
& FLOW_NW_FRAG_ANY
) {
6977 switch (ctx
.ofproto
->up
.frag_handling
) {
6978 case OFPC_FRAG_NORMAL
:
6979 /* We must pretend that transport ports are unavailable. */
6980 ctx
.xin
->flow
.tp_src
= ctx
.base_flow
.tp_src
= htons(0);
6981 ctx
.xin
->flow
.tp_dst
= ctx
.base_flow
.tp_dst
= htons(0);
6984 case OFPC_FRAG_DROP
:
6987 case OFPC_FRAG_REASM
:
6990 case OFPC_FRAG_NX_MATCH
:
6991 /* Nothing to do. */
6994 case OFPC_INVALID_TTL_TO_CONTROLLER
:
6999 in_port
= get_ofp_port(ctx
.ofproto
, ctx
.xin
->flow
.in_port
);
7000 special
= process_special(ctx
.ofproto
, &ctx
.xin
->flow
, in_port
,
7003 ctx
.xout
->slow
= special
;
7005 static struct vlog_rate_limit trace_rl
= VLOG_RATE_LIMIT_INIT(1, 1);
7006 struct initial_vals initial_vals
;
7007 size_t sample_actions_len
;
7008 uint32_t local_odp_port
;
7010 initial_vals
.vlan_tci
= ctx
.base_flow
.vlan_tci
;
7012 add_sflow_action(&ctx
);
7013 add_ipfix_action(&ctx
);
7014 sample_actions_len
= ctx
.xout
->odp_actions
.size
;
7016 if (tunnel_ecn_ok(&ctx
) && (!in_port
|| may_receive(in_port
, &ctx
))) {
7017 do_xlate_actions(ofpacts
, ofpacts_len
, &ctx
);
7019 /* We've let OFPP_NORMAL and the learning action look at the
7020 * packet, so drop it now if forwarding is disabled. */
7021 if (in_port
&& !stp_forward_in_state(in_port
->stp_state
)) {
7022 ctx
.xout
->odp_actions
.size
= sample_actions_len
;
7026 if (ctx
.max_resubmit_trigger
&& !ctx
.xin
->resubmit_hook
) {
7027 if (!hit_resubmit_limit
) {
7028 /* We didn't record the original flow. Make sure we do from
7030 hit_resubmit_limit
= true;
7031 } else if (!VLOG_DROP_ERR(&trace_rl
)) {
7032 struct ds ds
= DS_EMPTY_INITIALIZER
;
7034 ofproto_trace(ctx
.ofproto
, &orig_flow
, ctx
.xin
->packet
,
7035 &initial_vals
, &ds
);
7036 VLOG_ERR("Trace triggered by excessive resubmit "
7037 "recursion:\n%s", ds_cstr(&ds
));
7042 local_odp_port
= ofp_port_to_odp_port(ctx
.ofproto
, OFPP_LOCAL
);
7043 if (!connmgr_must_output_local(ctx
.ofproto
->up
.connmgr
, &ctx
.xin
->flow
,
7045 ctx
.xout
->odp_actions
.data
,
7046 ctx
.xout
->odp_actions
.size
)) {
7047 compose_output_action(&ctx
, OFPP_LOCAL
);
7049 if (ctx
.ofproto
->has_mirrors
) {
7050 add_mirror_actions(&ctx
, &orig_flow
);
7052 fix_sflow_action(&ctx
);
7055 ofpbuf_uninit(&ctx
.stack
);
7058 /* Translates the 'ofpacts_len' bytes of "struct ofpact"s starting at 'ofpacts'
7059 * into datapath actions, using 'ctx', and discards the datapath actions. */
7061 xlate_actions_for_side_effects(struct xlate_in
*xin
)
7063 struct xlate_out xout
;
7065 xlate_actions(xin
, &xout
);
7066 xlate_out_uninit(&xout
);
7070 xlate_report(struct xlate_ctx
*ctx
, const char *s
)
7072 if (ctx
->xin
->report_hook
) {
7073 ctx
->xin
->report_hook(ctx
, s
);
7077 /* OFPP_NORMAL implementation. */
7079 static struct ofport_dpif
*ofbundle_get_a_port(const struct ofbundle
*);
7081 /* Given 'vid', the VID obtained from the 802.1Q header that was received as
7082 * part of a packet (specify 0 if there was no 802.1Q header), and 'in_bundle',
7083 * the bundle on which the packet was received, returns the VLAN to which the
7086 * Both 'vid' and the return value are in the range 0...4095. */
7088 input_vid_to_vlan(const struct ofbundle
*in_bundle
, uint16_t vid
)
7090 switch (in_bundle
->vlan_mode
) {
7091 case PORT_VLAN_ACCESS
:
7092 return in_bundle
->vlan
;
7095 case PORT_VLAN_TRUNK
:
7098 case PORT_VLAN_NATIVE_UNTAGGED
:
7099 case PORT_VLAN_NATIVE_TAGGED
:
7100 return vid
? vid
: in_bundle
->vlan
;
7107 /* Checks whether a packet with the given 'vid' may ingress on 'in_bundle'.
7108 * If so, returns true. Otherwise, returns false and, if 'warn' is true, logs
7111 * 'vid' should be the VID obtained from the 802.1Q header that was received as
7112 * part of a packet (specify 0 if there was no 802.1Q header), in the range
7115 input_vid_is_valid(uint16_t vid
, struct ofbundle
*in_bundle
, bool warn
)
7117 /* Allow any VID on the OFPP_NONE port. */
7118 if (in_bundle
== &ofpp_none_bundle
) {
7122 switch (in_bundle
->vlan_mode
) {
7123 case PORT_VLAN_ACCESS
:
7126 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(1, 5);
7127 VLOG_WARN_RL(&rl
, "bridge %s: dropping VLAN %"PRIu16
" tagged "
7128 "packet received on port %s configured as VLAN "
7129 "%"PRIu16
" access port",
7130 in_bundle
->ofproto
->up
.name
, vid
,
7131 in_bundle
->name
, in_bundle
->vlan
);
7137 case PORT_VLAN_NATIVE_UNTAGGED
:
7138 case PORT_VLAN_NATIVE_TAGGED
:
7140 /* Port must always carry its native VLAN. */
7144 case PORT_VLAN_TRUNK
:
7145 if (!ofbundle_includes_vlan(in_bundle
, vid
)) {
7147 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(1, 5);
7148 VLOG_WARN_RL(&rl
, "bridge %s: dropping VLAN %"PRIu16
" packet "
7149 "received on port %s not configured for trunking "
7151 in_bundle
->ofproto
->up
.name
, vid
,
7152 in_bundle
->name
, vid
);
7164 /* Given 'vlan', the VLAN that a packet belongs to, and
7165 * 'out_bundle', a bundle on which the packet is to be output, returns the VID
7166 * that should be included in the 802.1Q header. (If the return value is 0,
7167 * then the 802.1Q header should only be included in the packet if there is a
7170 * Both 'vlan' and the return value are in the range 0...4095. */
7172 output_vlan_to_vid(const struct ofbundle
*out_bundle
, uint16_t vlan
)
7174 switch (out_bundle
->vlan_mode
) {
7175 case PORT_VLAN_ACCESS
:
7178 case PORT_VLAN_TRUNK
:
7179 case PORT_VLAN_NATIVE_TAGGED
:
7182 case PORT_VLAN_NATIVE_UNTAGGED
:
7183 return vlan
== out_bundle
->vlan
? 0 : vlan
;
7191 output_normal(struct xlate_ctx
*ctx
, const struct ofbundle
*out_bundle
,
7194 struct ofport_dpif
*port
;
7196 ovs_be16 tci
, old_tci
;
7198 vid
= output_vlan_to_vid(out_bundle
, vlan
);
7199 if (!out_bundle
->bond
) {
7200 port
= ofbundle_get_a_port(out_bundle
);
7202 port
= bond_choose_output_slave(out_bundle
->bond
, &ctx
->xin
->flow
,
7203 vid
, &ctx
->xout
->tags
);
7205 /* No slaves enabled, so drop packet. */
7210 old_tci
= ctx
->xin
->flow
.vlan_tci
;
7212 if (tci
|| out_bundle
->use_priority_tags
) {
7213 tci
|= ctx
->xin
->flow
.vlan_tci
& htons(VLAN_PCP_MASK
);
7215 tci
|= htons(VLAN_CFI
);
7218 ctx
->xin
->flow
.vlan_tci
= tci
;
7220 compose_output_action(ctx
, port
->up
.ofp_port
);
7221 ctx
->xin
->flow
.vlan_tci
= old_tci
;
7225 mirror_mask_ffs(mirror_mask_t mask
)
7227 BUILD_ASSERT_DECL(sizeof(unsigned int) >= sizeof(mask
));
7232 ofbundle_trunks_vlan(const struct ofbundle
*bundle
, uint16_t vlan
)
7234 return (bundle
->vlan_mode
!= PORT_VLAN_ACCESS
7235 && (!bundle
->trunks
|| bitmap_is_set(bundle
->trunks
, vlan
)));
7239 ofbundle_includes_vlan(const struct ofbundle
*bundle
, uint16_t vlan
)
7241 return vlan
== bundle
->vlan
|| ofbundle_trunks_vlan(bundle
, vlan
);
7244 /* Returns an arbitrary interface within 'bundle'. */
7245 static struct ofport_dpif
*
7246 ofbundle_get_a_port(const struct ofbundle
*bundle
)
7248 return CONTAINER_OF(list_front(&bundle
->ports
),
7249 struct ofport_dpif
, bundle_node
);
7253 vlan_is_mirrored(const struct ofmirror
*m
, int vlan
)
7255 return !m
->vlans
|| bitmap_is_set(m
->vlans
, vlan
);
7259 add_mirror_actions(struct xlate_ctx
*ctx
, const struct flow
*orig_flow
)
7261 struct ofproto_dpif
*ofproto
= ctx
->ofproto
;
7262 mirror_mask_t mirrors
;
7263 struct ofbundle
*in_bundle
;
7266 const struct nlattr
*a
;
7269 in_bundle
= lookup_input_bundle(ctx
->ofproto
, orig_flow
->in_port
,
7270 ctx
->xin
->packet
!= NULL
, NULL
);
7274 mirrors
= in_bundle
->src_mirrors
;
7276 /* Drop frames on bundles reserved for mirroring. */
7277 if (in_bundle
->mirror_out
) {
7278 if (ctx
->xin
->packet
!= NULL
) {
7279 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(1, 5);
7280 VLOG_WARN_RL(&rl
, "bridge %s: dropping packet received on port "
7281 "%s, which is reserved exclusively for mirroring",
7282 ctx
->ofproto
->up
.name
, in_bundle
->name
);
7288 vid
= vlan_tci_to_vid(orig_flow
->vlan_tci
);
7289 if (!input_vid_is_valid(vid
, in_bundle
, ctx
->xin
->packet
!= NULL
)) {
7292 vlan
= input_vid_to_vlan(in_bundle
, vid
);
7294 /* Look at the output ports to check for destination selections. */
7296 NL_ATTR_FOR_EACH (a
, left
, ctx
->xout
->odp_actions
.data
,
7297 ctx
->xout
->odp_actions
.size
) {
7298 enum ovs_action_attr type
= nl_attr_type(a
);
7299 struct ofport_dpif
*ofport
;
7301 if (type
!= OVS_ACTION_ATTR_OUTPUT
) {
7305 ofport
= get_odp_port(ofproto
, nl_attr_get_u32(a
));
7306 if (ofport
&& ofport
->bundle
) {
7307 mirrors
|= ofport
->bundle
->dst_mirrors
;
7315 /* Restore the original packet before adding the mirror actions. */
7316 ctx
->xin
->flow
= *orig_flow
;
7321 m
= ofproto
->mirrors
[mirror_mask_ffs(mirrors
) - 1];
7323 if (!vlan_is_mirrored(m
, vlan
)) {
7324 mirrors
= zero_rightmost_1bit(mirrors
);
7328 mirrors
&= ~m
->dup_mirrors
;
7329 ctx
->xout
->mirrors
|= m
->dup_mirrors
;
7331 output_normal(ctx
, m
->out
, vlan
);
7332 } else if (vlan
!= m
->out_vlan
7333 && !eth_addr_is_reserved(orig_flow
->dl_dst
)) {
7334 struct ofbundle
*bundle
;
7336 HMAP_FOR_EACH (bundle
, hmap_node
, &ofproto
->bundles
) {
7337 if (ofbundle_includes_vlan(bundle
, m
->out_vlan
)
7338 && !bundle
->mirror_out
) {
7339 output_normal(ctx
, bundle
, m
->out_vlan
);
7347 update_mirror_stats(struct ofproto_dpif
*ofproto
, mirror_mask_t mirrors
,
7348 uint64_t packets
, uint64_t bytes
)
7354 for (; mirrors
; mirrors
= zero_rightmost_1bit(mirrors
)) {
7357 m
= ofproto
->mirrors
[mirror_mask_ffs(mirrors
) - 1];
7360 /* In normal circumstances 'm' will not be NULL. However,
7361 * if mirrors are reconfigured, we can temporarily get out
7362 * of sync in facet_revalidate(). We could "correct" the
7363 * mirror list before reaching here, but doing that would
7364 * not properly account the traffic stats we've currently
7365 * accumulated for previous mirror configuration. */
7369 m
->packet_count
+= packets
;
7370 m
->byte_count
+= bytes
;
7374 /* A VM broadcasts a gratuitous ARP to indicate that it has resumed after
7375 * migration. Older Citrix-patched Linux DomU used gratuitous ARP replies to
7376 * indicate this; newer upstream kernels use gratuitous ARP requests. */
7378 is_gratuitous_arp(const struct flow
*flow
)
7380 return (flow
->dl_type
== htons(ETH_TYPE_ARP
)
7381 && eth_addr_is_broadcast(flow
->dl_dst
)
7382 && (flow
->nw_proto
== ARP_OP_REPLY
7383 || (flow
->nw_proto
== ARP_OP_REQUEST
7384 && flow
->nw_src
== flow
->nw_dst
)));
7388 update_learning_table(struct ofproto_dpif
*ofproto
,
7389 const struct flow
*flow
, int vlan
,
7390 struct ofbundle
*in_bundle
)
7392 struct mac_entry
*mac
;
7394 /* Don't learn the OFPP_NONE port. */
7395 if (in_bundle
== &ofpp_none_bundle
) {
7399 if (!mac_learning_may_learn(ofproto
->ml
, flow
->dl_src
, vlan
)) {
7403 mac
= mac_learning_insert(ofproto
->ml
, flow
->dl_src
, vlan
);
7404 if (is_gratuitous_arp(flow
)) {
7405 /* We don't want to learn from gratuitous ARP packets that are
7406 * reflected back over bond slaves so we lock the learning table. */
7407 if (!in_bundle
->bond
) {
7408 mac_entry_set_grat_arp_lock(mac
);
7409 } else if (mac_entry_is_grat_arp_locked(mac
)) {
7414 if (mac_entry_is_new(mac
) || mac
->port
.p
!= in_bundle
) {
7415 /* The log messages here could actually be useful in debugging,
7416 * so keep the rate limit relatively high. */
7417 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(30, 300);
7418 VLOG_DBG_RL(&rl
, "bridge %s: learned that "ETH_ADDR_FMT
" is "
7419 "on port %s in VLAN %d",
7420 ofproto
->up
.name
, ETH_ADDR_ARGS(flow
->dl_src
),
7421 in_bundle
->name
, vlan
);
7423 mac
->port
.p
= in_bundle
;
7424 tag_set_add(&ofproto
->backer
->revalidate_set
,
7425 mac_learning_changed(ofproto
->ml
, mac
));
7429 static struct ofbundle
*
7430 lookup_input_bundle(const struct ofproto_dpif
*ofproto
, uint16_t in_port
,
7431 bool warn
, struct ofport_dpif
**in_ofportp
)
7433 struct ofport_dpif
*ofport
;
7435 /* Find the port and bundle for the received packet. */
7436 ofport
= get_ofp_port(ofproto
, in_port
);
7438 *in_ofportp
= ofport
;
7440 if (ofport
&& ofport
->bundle
) {
7441 return ofport
->bundle
;
7444 /* Special-case OFPP_NONE, which a controller may use as the ingress
7445 * port for traffic that it is sourcing. */
7446 if (in_port
== OFPP_NONE
) {
7447 return &ofpp_none_bundle
;
7450 /* Odd. A few possible reasons here:
7452 * - We deleted a port but there are still a few packets queued up
7455 * - Someone externally added a port (e.g. "ovs-dpctl add-if") that
7456 * we don't know about.
7458 * - The ofproto client didn't configure the port as part of a bundle.
7459 * This is particularly likely to happen if a packet was received on the
7460 * port after it was created, but before the client had a chance to
7461 * configure its bundle.
7464 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(1, 5);
7466 VLOG_WARN_RL(&rl
, "bridge %s: received packet on unknown "
7467 "port %"PRIu16
, ofproto
->up
.name
, in_port
);
7472 /* Determines whether packets in 'flow' within 'ofproto' should be forwarded or
7473 * dropped. Returns true if they may be forwarded, false if they should be
7476 * 'in_port' must be the ofport_dpif that corresponds to flow->in_port.
7477 * 'in_port' must be part of a bundle (e.g. in_port->bundle must be nonnull).
7479 * 'vlan' must be the VLAN that corresponds to flow->vlan_tci on 'in_port', as
7480 * returned by input_vid_to_vlan(). It must be a valid VLAN for 'in_port', as
7481 * checked by input_vid_is_valid().
7483 * May also add tags to '*tags', although the current implementation only does
7484 * so in one special case.
7487 is_admissible(struct xlate_ctx
*ctx
, struct ofport_dpif
*in_port
,
7490 struct ofproto_dpif
*ofproto
= ctx
->ofproto
;
7491 struct flow
*flow
= &ctx
->xin
->flow
;
7492 struct ofbundle
*in_bundle
= in_port
->bundle
;
7494 /* Drop frames for reserved multicast addresses
7495 * only if forward_bpdu option is absent. */
7496 if (!ofproto
->up
.forward_bpdu
&& eth_addr_is_reserved(flow
->dl_dst
)) {
7497 xlate_report(ctx
, "packet has reserved destination MAC, dropping");
7501 if (in_bundle
->bond
) {
7502 struct mac_entry
*mac
;
7504 switch (bond_check_admissibility(in_bundle
->bond
, in_port
,
7505 flow
->dl_dst
, &ctx
->xout
->tags
)) {
7510 xlate_report(ctx
, "bonding refused admissibility, dropping");
7513 case BV_DROP_IF_MOVED
:
7514 mac
= mac_learning_lookup(ofproto
->ml
, flow
->dl_src
, vlan
, NULL
);
7515 if (mac
&& mac
->port
.p
!= in_bundle
&&
7516 (!is_gratuitous_arp(flow
)
7517 || mac_entry_is_grat_arp_locked(mac
))) {
7518 xlate_report(ctx
, "SLB bond thinks this packet looped back, "
7530 xlate_normal(struct xlate_ctx
*ctx
)
7532 struct ofport_dpif
*in_port
;
7533 struct ofbundle
*in_bundle
;
7534 struct mac_entry
*mac
;
7538 ctx
->xout
->has_normal
= true;
7540 in_bundle
= lookup_input_bundle(ctx
->ofproto
, ctx
->xin
->flow
.in_port
,
7541 ctx
->xin
->packet
!= NULL
, &in_port
);
7543 xlate_report(ctx
, "no input bundle, dropping");
7547 /* Drop malformed frames. */
7548 if (ctx
->xin
->flow
.dl_type
== htons(ETH_TYPE_VLAN
) &&
7549 !(ctx
->xin
->flow
.vlan_tci
& htons(VLAN_CFI
))) {
7550 if (ctx
->xin
->packet
!= NULL
) {
7551 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(1, 5);
7552 VLOG_WARN_RL(&rl
, "bridge %s: dropping packet with partial "
7553 "VLAN tag received on port %s",
7554 ctx
->ofproto
->up
.name
, in_bundle
->name
);
7556 xlate_report(ctx
, "partial VLAN tag, dropping");
7560 /* Drop frames on bundles reserved for mirroring. */
7561 if (in_bundle
->mirror_out
) {
7562 if (ctx
->xin
->packet
!= NULL
) {
7563 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(1, 5);
7564 VLOG_WARN_RL(&rl
, "bridge %s: dropping packet received on port "
7565 "%s, which is reserved exclusively for mirroring",
7566 ctx
->ofproto
->up
.name
, in_bundle
->name
);
7568 xlate_report(ctx
, "input port is mirror output port, dropping");
7573 vid
= vlan_tci_to_vid(ctx
->xin
->flow
.vlan_tci
);
7574 if (!input_vid_is_valid(vid
, in_bundle
, ctx
->xin
->packet
!= NULL
)) {
7575 xlate_report(ctx
, "disallowed VLAN VID for this input port, dropping");
7578 vlan
= input_vid_to_vlan(in_bundle
, vid
);
7580 /* Check other admissibility requirements. */
7581 if (in_port
&& !is_admissible(ctx
, in_port
, vlan
)) {
7585 /* Learn source MAC. */
7586 if (ctx
->xin
->may_learn
) {
7587 update_learning_table(ctx
->ofproto
, &ctx
->xin
->flow
, vlan
, in_bundle
);
7590 /* Determine output bundle. */
7591 mac
= mac_learning_lookup(ctx
->ofproto
->ml
, ctx
->xin
->flow
.dl_dst
, vlan
,
7594 if (mac
->port
.p
!= in_bundle
) {
7595 xlate_report(ctx
, "forwarding to learned port");
7596 output_normal(ctx
, mac
->port
.p
, vlan
);
7598 xlate_report(ctx
, "learned port is input port, dropping");
7601 struct ofbundle
*bundle
;
7603 xlate_report(ctx
, "no learned MAC for destination, flooding");
7604 HMAP_FOR_EACH (bundle
, hmap_node
, &ctx
->ofproto
->bundles
) {
7605 if (bundle
!= in_bundle
7606 && ofbundle_includes_vlan(bundle
, vlan
)
7607 && bundle
->floodable
7608 && !bundle
->mirror_out
) {
7609 output_normal(ctx
, bundle
, vlan
);
7612 ctx
->xout
->nf_output_iface
= NF_OUT_FLOOD
;
7616 /* Optimized flow revalidation.
7618 * It's a difficult problem, in general, to tell which facets need to have
7619 * their actions recalculated whenever the OpenFlow flow table changes. We
7620 * don't try to solve that general problem: for most kinds of OpenFlow flow
7621 * table changes, we recalculate the actions for every facet. This is
7622 * relatively expensive, but it's good enough if the OpenFlow flow table
7623 * doesn't change very often.
7625 * However, we can expect one particular kind of OpenFlow flow table change to
7626 * happen frequently: changes caused by MAC learning. To avoid wasting a lot
7627 * of CPU on revalidating every facet whenever MAC learning modifies the flow
7628 * table, we add a special case that applies to flow tables in which every rule
7629 * has the same form (that is, the same wildcards), except that the table is
7630 * also allowed to have a single "catch-all" flow that matches all packets. We
7631 * optimize this case by tagging all of the facets that resubmit into the table
7632 * and invalidating the same tag whenever a flow changes in that table. The
7633 * end result is that we revalidate just the facets that need it (and sometimes
7634 * a few more, but not all of the facets or even all of the facets that
7635 * resubmit to the table modified by MAC learning). */
7637 /* Calculates the tag to use for 'flow' and mask 'mask' when it is inserted
7638 * into an OpenFlow table with the given 'basis'. */
7640 rule_calculate_tag(const struct flow
*flow
, const struct minimask
*mask
,
7643 if (minimask_is_catchall(mask
)) {
7646 uint32_t hash
= flow_hash_in_minimask(flow
, mask
, secret
);
7647 return tag_create_deterministic(hash
);
7651 /* Following a change to OpenFlow table 'table_id' in 'ofproto', update the
7652 * taggability of that table.
7654 * This function must be called after *each* change to a flow table. If you
7655 * skip calling it on some changes then the pointer comparisons at the end can
7656 * be invalid if you get unlucky. For example, if a flow removal causes a
7657 * cls_table to be destroyed and then a flow insertion causes a cls_table with
7658 * different wildcards to be created with the same address, then this function
7659 * will incorrectly skip revalidation. */
7661 table_update_taggable(struct ofproto_dpif
*ofproto
, uint8_t table_id
)
7663 struct table_dpif
*table
= &ofproto
->tables
[table_id
];
7664 const struct oftable
*oftable
= &ofproto
->up
.tables
[table_id
];
7665 struct cls_table
*catchall
, *other
;
7666 struct cls_table
*t
;
7668 catchall
= other
= NULL
;
7670 switch (hmap_count(&oftable
->cls
.tables
)) {
7672 /* We could tag this OpenFlow table but it would make the logic a
7673 * little harder and it's a corner case that doesn't seem worth it
7679 HMAP_FOR_EACH (t
, hmap_node
, &oftable
->cls
.tables
) {
7680 if (cls_table_is_catchall(t
)) {
7682 } else if (!other
) {
7685 /* Indicate that we can't tag this by setting both tables to
7686 * NULL. (We know that 'catchall' is already NULL.) */
7693 /* Can't tag this table. */
7697 if (table
->catchall_table
!= catchall
|| table
->other_table
!= other
) {
7698 table
->catchall_table
= catchall
;
7699 table
->other_table
= other
;
7700 ofproto
->backer
->need_revalidate
= REV_FLOW_TABLE
;
7704 /* Given 'rule' that has changed in some way (either it is a rule being
7705 * inserted, a rule being deleted, or a rule whose actions are being
7706 * modified), marks facets for revalidation to ensure that packets will be
7707 * forwarded correctly according to the new state of the flow table.
7709 * This function must be called after *each* change to a flow table. See
7710 * the comment on table_update_taggable() for more information. */
7712 rule_invalidate(const struct rule_dpif
*rule
)
7714 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(rule
->up
.ofproto
);
7716 table_update_taggable(ofproto
, rule
->up
.table_id
);
7718 if (!ofproto
->backer
->need_revalidate
) {
7719 struct table_dpif
*table
= &ofproto
->tables
[rule
->up
.table_id
];
7721 if (table
->other_table
&& rule
->tag
) {
7722 tag_set_add(&ofproto
->backer
->revalidate_set
, rule
->tag
);
7724 ofproto
->backer
->need_revalidate
= REV_FLOW_TABLE
;
7730 set_frag_handling(struct ofproto
*ofproto_
,
7731 enum ofp_config_flags frag_handling
)
7733 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
7734 if (frag_handling
!= OFPC_FRAG_REASM
) {
7735 ofproto
->backer
->need_revalidate
= REV_RECONFIGURE
;
7743 packet_out(struct ofproto
*ofproto_
, struct ofpbuf
*packet
,
7744 const struct flow
*flow
,
7745 const struct ofpact
*ofpacts
, size_t ofpacts_len
)
7747 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
7748 struct initial_vals initial_vals
;
7749 struct odputil_keybuf keybuf
;
7750 struct dpif_flow_stats stats
;
7751 struct xlate_out xout
;
7752 struct xlate_in xin
;
7756 ofpbuf_use_stack(&key
, &keybuf
, sizeof keybuf
);
7757 odp_flow_key_from_flow(&key
, flow
,
7758 ofp_port_to_odp_port(ofproto
, flow
->in_port
));
7760 dpif_flow_stats_extract(flow
, packet
, time_msec(), &stats
);
7762 initial_vals
.vlan_tci
= flow
->vlan_tci
;
7763 xlate_in_init(&xin
, ofproto
, flow
, &initial_vals
, NULL
, stats
.tcp_flags
,
7765 xin
.resubmit_stats
= &stats
;
7766 xin
.ofpacts_len
= ofpacts_len
;
7767 xin
.ofpacts
= ofpacts
;
7769 xlate_actions(&xin
, &xout
);
7770 dpif_execute(ofproto
->backer
->dpif
, key
.data
, key
.size
,
7771 xout
.odp_actions
.data
, xout
.odp_actions
.size
, packet
);
7772 xlate_out_uninit(&xout
);
7780 set_netflow(struct ofproto
*ofproto_
,
7781 const struct netflow_options
*netflow_options
)
7783 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
7785 if (netflow_options
) {
7786 if (!ofproto
->netflow
) {
7787 ofproto
->netflow
= netflow_create();
7789 return netflow_set_options(ofproto
->netflow
, netflow_options
);
7791 netflow_destroy(ofproto
->netflow
);
7792 ofproto
->netflow
= NULL
;
7798 get_netflow_ids(const struct ofproto
*ofproto_
,
7799 uint8_t *engine_type
, uint8_t *engine_id
)
7801 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
7803 dpif_get_netflow_ids(ofproto
->backer
->dpif
, engine_type
, engine_id
);
7807 send_active_timeout(struct ofproto_dpif
*ofproto
, struct facet
*facet
)
7809 if (!facet_is_controller_flow(facet
) &&
7810 netflow_active_timeout_expired(ofproto
->netflow
, &facet
->nf_flow
)) {
7811 struct subfacet
*subfacet
;
7812 struct ofexpired expired
;
7814 LIST_FOR_EACH (subfacet
, list_node
, &facet
->subfacets
) {
7815 if (subfacet
->path
== SF_FAST_PATH
) {
7816 struct dpif_flow_stats stats
;
7818 subfacet_install(subfacet
, &facet
->xout
.odp_actions
, &stats
);
7819 subfacet_update_stats(subfacet
, &stats
);
7823 expired
.flow
= facet
->flow
;
7824 expired
.packet_count
= facet
->packet_count
;
7825 expired
.byte_count
= facet
->byte_count
;
7826 expired
.used
= facet
->used
;
7827 netflow_expire(ofproto
->netflow
, &facet
->nf_flow
, &expired
);
7832 send_netflow_active_timeouts(struct ofproto_dpif
*ofproto
)
7834 struct facet
*facet
;
7836 HMAP_FOR_EACH (facet
, hmap_node
, &ofproto
->facets
) {
7837 send_active_timeout(ofproto
, facet
);
7841 static struct ofproto_dpif
*
7842 ofproto_dpif_lookup(const char *name
)
7844 struct ofproto_dpif
*ofproto
;
7846 HMAP_FOR_EACH_WITH_HASH (ofproto
, all_ofproto_dpifs_node
,
7847 hash_string(name
, 0), &all_ofproto_dpifs
) {
7848 if (!strcmp(ofproto
->up
.name
, name
)) {
7856 ofproto_unixctl_fdb_flush(struct unixctl_conn
*conn
, int argc
,
7857 const char *argv
[], void *aux OVS_UNUSED
)
7859 struct ofproto_dpif
*ofproto
;
7862 ofproto
= ofproto_dpif_lookup(argv
[1]);
7864 unixctl_command_reply_error(conn
, "no such bridge");
7867 mac_learning_flush(ofproto
->ml
, &ofproto
->backer
->revalidate_set
);
7869 HMAP_FOR_EACH (ofproto
, all_ofproto_dpifs_node
, &all_ofproto_dpifs
) {
7870 mac_learning_flush(ofproto
->ml
, &ofproto
->backer
->revalidate_set
);
7874 unixctl_command_reply(conn
, "table successfully flushed");
7878 ofproto_unixctl_fdb_show(struct unixctl_conn
*conn
, int argc OVS_UNUSED
,
7879 const char *argv
[], void *aux OVS_UNUSED
)
7881 struct ds ds
= DS_EMPTY_INITIALIZER
;
7882 const struct ofproto_dpif
*ofproto
;
7883 const struct mac_entry
*e
;
7885 ofproto
= ofproto_dpif_lookup(argv
[1]);
7887 unixctl_command_reply_error(conn
, "no such bridge");
7891 ds_put_cstr(&ds
, " port VLAN MAC Age\n");
7892 LIST_FOR_EACH (e
, lru_node
, &ofproto
->ml
->lrus
) {
7893 struct ofbundle
*bundle
= e
->port
.p
;
7894 ds_put_format(&ds
, "%5d %4d "ETH_ADDR_FMT
" %3d\n",
7895 ofbundle_get_a_port(bundle
)->odp_port
,
7896 e
->vlan
, ETH_ADDR_ARGS(e
->mac
),
7897 mac_entry_age(ofproto
->ml
, e
));
7899 unixctl_command_reply(conn
, ds_cstr(&ds
));
7904 struct xlate_out xout
;
7905 struct xlate_in xin
;
7911 trace_format_rule(struct ds
*result
, uint8_t table_id
, int level
,
7912 const struct rule_dpif
*rule
)
7914 ds_put_char_multiple(result
, '\t', level
);
7916 ds_put_cstr(result
, "No match\n");
7920 ds_put_format(result
, "Rule: table=%"PRIu8
" cookie=%#"PRIx64
" ",
7921 table_id
, ntohll(rule
->up
.flow_cookie
));
7922 cls_rule_format(&rule
->up
.cr
, result
);
7923 ds_put_char(result
, '\n');
7925 ds_put_char_multiple(result
, '\t', level
);
7926 ds_put_cstr(result
, "OpenFlow ");
7927 ofpacts_format(rule
->up
.ofpacts
, rule
->up
.ofpacts_len
, result
);
7928 ds_put_char(result
, '\n');
7932 trace_format_flow(struct ds
*result
, int level
, const char *title
,
7933 struct trace_ctx
*trace
)
7935 ds_put_char_multiple(result
, '\t', level
);
7936 ds_put_format(result
, "%s: ", title
);
7937 if (flow_equal(&trace
->xin
.flow
, &trace
->flow
)) {
7938 ds_put_cstr(result
, "unchanged");
7940 flow_format(result
, &trace
->xin
.flow
);
7941 trace
->flow
= trace
->xin
.flow
;
7943 ds_put_char(result
, '\n');
7947 trace_format_regs(struct ds
*result
, int level
, const char *title
,
7948 struct trace_ctx
*trace
)
7952 ds_put_char_multiple(result
, '\t', level
);
7953 ds_put_format(result
, "%s:", title
);
7954 for (i
= 0; i
< FLOW_N_REGS
; i
++) {
7955 ds_put_format(result
, " reg%zu=0x%"PRIx32
, i
, trace
->flow
.regs
[i
]);
7957 ds_put_char(result
, '\n');
7961 trace_format_odp(struct ds
*result
, int level
, const char *title
,
7962 struct trace_ctx
*trace
)
7964 struct ofpbuf
*odp_actions
= &trace
->xout
.odp_actions
;
7966 ds_put_char_multiple(result
, '\t', level
);
7967 ds_put_format(result
, "%s: ", title
);
7968 format_odp_actions(result
, odp_actions
->data
, odp_actions
->size
);
7969 ds_put_char(result
, '\n');
7973 trace_resubmit(struct xlate_ctx
*ctx
, struct rule_dpif
*rule
)
7975 struct trace_ctx
*trace
= CONTAINER_OF(ctx
->xin
, struct trace_ctx
, xin
);
7976 struct ds
*result
= trace
->result
;
7978 ds_put_char(result
, '\n');
7979 trace_format_flow(result
, ctx
->recurse
+ 1, "Resubmitted flow", trace
);
7980 trace_format_regs(result
, ctx
->recurse
+ 1, "Resubmitted regs", trace
);
7981 trace_format_odp(result
, ctx
->recurse
+ 1, "Resubmitted odp", trace
);
7982 trace_format_rule(result
, ctx
->table_id
, ctx
->recurse
+ 1, rule
);
7986 trace_report(struct xlate_ctx
*ctx
, const char *s
)
7988 struct trace_ctx
*trace
= CONTAINER_OF(ctx
->xin
, struct trace_ctx
, xin
);
7989 struct ds
*result
= trace
->result
;
7991 ds_put_char_multiple(result
, '\t', ctx
->recurse
);
7992 ds_put_cstr(result
, s
);
7993 ds_put_char(result
, '\n');
7997 ofproto_unixctl_trace(struct unixctl_conn
*conn
, int argc
, const char *argv
[],
7998 void *aux OVS_UNUSED
)
8000 const struct dpif_backer
*backer
;
8001 struct ofproto_dpif
*ofproto
;
8002 struct ofpbuf odp_key
;
8003 struct ofpbuf
*packet
;
8004 struct initial_vals initial_vals
;
8012 ofpbuf_init(&odp_key
, 0);
8014 /* Handle "-generate" or a hex string as the last argument. */
8015 if (!strcmp(argv
[argc
- 1], "-generate")) {
8016 packet
= ofpbuf_new(0);
8019 const char *error
= eth_from_hex(argv
[argc
- 1], &packet
);
8022 } else if (argc
== 4) {
8023 /* The 3-argument form must end in "-generate' or a hex string. */
8024 unixctl_command_reply_error(conn
, error
);
8029 /* Parse the flow and determine whether a datapath or
8030 * bridge is specified. If function odp_flow_key_from_string()
8031 * returns 0, the flow is a odp_flow. If function
8032 * parse_ofp_exact_flow() returns 0, the flow is a br_flow. */
8033 if (!odp_flow_key_from_string(argv
[argc
- 1], NULL
, &odp_key
)) {
8034 /* If the odp_flow is the second argument,
8035 * the datapath name is the first argument. */
8037 const char *dp_type
;
8038 if (!strncmp(argv
[1], "ovs-", 4)) {
8039 dp_type
= argv
[1] + 4;
8043 backer
= shash_find_data(&all_dpif_backers
, dp_type
);
8045 unixctl_command_reply_error(conn
, "Cannot find datapath "
8050 /* No datapath name specified, so there should be only one
8052 struct shash_node
*node
;
8053 if (shash_count(&all_dpif_backers
) != 1) {
8054 unixctl_command_reply_error(conn
, "Must specify datapath "
8055 "name, there is more than one type of datapath");
8058 node
= shash_first(&all_dpif_backers
);
8059 backer
= node
->data
;
8062 /* Extract the ofproto_dpif object from the ofproto_receive()
8064 if (ofproto_receive(backer
, NULL
, odp_key
.data
,
8065 odp_key
.size
, &flow
, NULL
, &ofproto
, NULL
,
8067 unixctl_command_reply_error(conn
, "Invalid datapath flow");
8070 ds_put_format(&result
, "Bridge: %s\n", ofproto
->up
.name
);
8071 } else if (!parse_ofp_exact_flow(&flow
, argv
[argc
- 1])) {
8073 unixctl_command_reply_error(conn
, "Must specify bridge name");
8077 ofproto
= ofproto_dpif_lookup(argv
[1]);
8079 unixctl_command_reply_error(conn
, "Unknown bridge name");
8082 initial_vals
.vlan_tci
= flow
.vlan_tci
;
8084 unixctl_command_reply_error(conn
, "Bad flow syntax");
8088 /* Generate a packet, if requested. */
8090 if (!packet
->size
) {
8091 flow_compose(packet
, &flow
);
8093 ds_put_cstr(&result
, "Packet: ");
8094 s
= ofp_packet_to_string(packet
->data
, packet
->size
);
8095 ds_put_cstr(&result
, s
);
8098 /* Use the metadata from the flow and the packet argument
8099 * to reconstruct the flow. */
8100 flow_extract(packet
, flow
.skb_priority
, flow
.skb_mark
, NULL
,
8101 flow
.in_port
, &flow
);
8102 initial_vals
.vlan_tci
= flow
.vlan_tci
;
8106 ofproto_trace(ofproto
, &flow
, packet
, &initial_vals
, &result
);
8107 unixctl_command_reply(conn
, ds_cstr(&result
));
8110 ds_destroy(&result
);
8111 ofpbuf_delete(packet
);
8112 ofpbuf_uninit(&odp_key
);
8116 ofproto_trace(struct ofproto_dpif
*ofproto
, const struct flow
*flow
,
8117 const struct ofpbuf
*packet
,
8118 const struct initial_vals
*initial_vals
, struct ds
*ds
)
8120 struct rule_dpif
*rule
;
8122 ds_put_cstr(ds
, "Flow: ");
8123 flow_format(ds
, flow
);
8124 ds_put_char(ds
, '\n');
8126 rule
= rule_dpif_lookup(ofproto
, flow
);
8128 trace_format_rule(ds
, 0, 0, rule
);
8129 if (rule
== ofproto
->miss_rule
) {
8130 ds_put_cstr(ds
, "\nNo match, flow generates \"packet in\"s.\n");
8131 } else if (rule
== ofproto
->no_packet_in_rule
) {
8132 ds_put_cstr(ds
, "\nNo match, packets dropped because "
8133 "OFPPC_NO_PACKET_IN is set on in_port.\n");
8134 } else if (rule
== ofproto
->drop_frags_rule
) {
8135 ds_put_cstr(ds
, "\nPackets dropped because they are IP fragments "
8136 "and the fragment handling mode is \"drop\".\n");
8140 uint64_t odp_actions_stub
[1024 / 8];
8141 struct ofpbuf odp_actions
;
8143 struct trace_ctx trace
;
8146 tcp_flags
= packet
? packet_get_tcp_flags(packet
, flow
) : 0;
8149 ofpbuf_use_stub(&odp_actions
,
8150 odp_actions_stub
, sizeof odp_actions_stub
);
8151 xlate_in_init(&trace
.xin
, ofproto
, flow
, initial_vals
, rule
, tcp_flags
,
8153 trace
.xin
.resubmit_hook
= trace_resubmit
;
8154 trace
.xin
.report_hook
= trace_report
;
8155 xlate_actions(&trace
.xin
, &trace
.xout
);
8157 ds_put_char(ds
, '\n');
8158 trace_format_flow(ds
, 0, "Final flow", &trace
);
8159 ds_put_cstr(ds
, "Datapath actions: ");
8160 format_odp_actions(ds
, trace
.xout
.odp_actions
.data
,
8161 trace
.xout
.odp_actions
.size
);
8163 if (trace
.xout
.slow
) {
8164 ds_put_cstr(ds
, "\nThis flow is handled by the userspace "
8165 "slow path because it:");
8166 switch (trace
.xout
.slow
) {
8168 ds_put_cstr(ds
, "\n\t- Consists of CFM packets.");
8171 ds_put_cstr(ds
, "\n\t- Consists of LACP packets.");
8174 ds_put_cstr(ds
, "\n\t- Consists of STP packets.");
8177 ds_put_cstr(ds
, "\n\t- Consists of BFD packets.");
8179 case SLOW_CONTROLLER
:
8180 ds_put_cstr(ds
, "\n\t- Sends \"packet-in\" messages "
8181 "to the OpenFlow controller.");
8188 xlate_out_uninit(&trace
.xout
);
8193 ofproto_dpif_clog(struct unixctl_conn
*conn OVS_UNUSED
, int argc OVS_UNUSED
,
8194 const char *argv
[] OVS_UNUSED
, void *aux OVS_UNUSED
)
8197 unixctl_command_reply(conn
, NULL
);
8201 ofproto_dpif_unclog(struct unixctl_conn
*conn OVS_UNUSED
, int argc OVS_UNUSED
,
8202 const char *argv
[] OVS_UNUSED
, void *aux OVS_UNUSED
)
8205 unixctl_command_reply(conn
, NULL
);
8208 /* Runs a self-check of flow translations in 'ofproto'. Appends a message to
8209 * 'reply' describing the results. */
8211 ofproto_dpif_self_check__(struct ofproto_dpif
*ofproto
, struct ds
*reply
)
8213 struct facet
*facet
;
8217 HMAP_FOR_EACH (facet
, hmap_node
, &ofproto
->facets
) {
8218 if (!facet_check_consistency(facet
)) {
8223 ofproto
->backer
->need_revalidate
= REV_INCONSISTENCY
;
8227 ds_put_format(reply
, "%s: self-check failed (%d errors)\n",
8228 ofproto
->up
.name
, errors
);
8230 ds_put_format(reply
, "%s: self-check passed\n", ofproto
->up
.name
);
8235 ofproto_dpif_self_check(struct unixctl_conn
*conn
,
8236 int argc
, const char *argv
[], void *aux OVS_UNUSED
)
8238 struct ds reply
= DS_EMPTY_INITIALIZER
;
8239 struct ofproto_dpif
*ofproto
;
8242 ofproto
= ofproto_dpif_lookup(argv
[1]);
8244 unixctl_command_reply_error(conn
, "Unknown ofproto (use "
8245 "ofproto/list for help)");
8248 ofproto_dpif_self_check__(ofproto
, &reply
);
8250 HMAP_FOR_EACH (ofproto
, all_ofproto_dpifs_node
, &all_ofproto_dpifs
) {
8251 ofproto_dpif_self_check__(ofproto
, &reply
);
8255 unixctl_command_reply(conn
, ds_cstr(&reply
));
8259 /* Store the current ofprotos in 'ofproto_shash'. Returns a sorted list
8260 * of the 'ofproto_shash' nodes. It is the responsibility of the caller
8261 * to destroy 'ofproto_shash' and free the returned value. */
8262 static const struct shash_node
**
8263 get_ofprotos(struct shash
*ofproto_shash
)
8265 const struct ofproto_dpif
*ofproto
;
8267 HMAP_FOR_EACH (ofproto
, all_ofproto_dpifs_node
, &all_ofproto_dpifs
) {
8268 char *name
= xasprintf("%s@%s", ofproto
->up
.type
, ofproto
->up
.name
);
8269 shash_add_nocopy(ofproto_shash
, name
, ofproto
);
8272 return shash_sort(ofproto_shash
);
8276 ofproto_unixctl_dpif_dump_dps(struct unixctl_conn
*conn
, int argc OVS_UNUSED
,
8277 const char *argv
[] OVS_UNUSED
,
8278 void *aux OVS_UNUSED
)
8280 struct ds ds
= DS_EMPTY_INITIALIZER
;
8281 struct shash ofproto_shash
;
8282 const struct shash_node
**sorted_ofprotos
;
8285 shash_init(&ofproto_shash
);
8286 sorted_ofprotos
= get_ofprotos(&ofproto_shash
);
8287 for (i
= 0; i
< shash_count(&ofproto_shash
); i
++) {
8288 const struct shash_node
*node
= sorted_ofprotos
[i
];
8289 ds_put_format(&ds
, "%s\n", node
->name
);
8292 shash_destroy(&ofproto_shash
);
8293 free(sorted_ofprotos
);
8295 unixctl_command_reply(conn
, ds_cstr(&ds
));
8300 show_dp_format(const struct ofproto_dpif
*ofproto
, struct ds
*ds
)
8302 const struct shash_node
**ports
;
8304 struct avg_subfacet_rates lifetime
;
8305 unsigned long long int minutes
;
8306 const int min_ms
= 60 * 1000; /* milliseconds in one minute. */
8308 minutes
= (time_msec() - ofproto
->created
) / min_ms
;
8311 lifetime
.add_rate
= (double)ofproto
->total_subfacet_add_count
8313 lifetime
.del_rate
= (double)ofproto
->total_subfacet_del_count
8316 lifetime
.add_rate
= 0.0;
8317 lifetime
.del_rate
= 0.0;
8320 ds_put_format(ds
, "%s (%s):\n", ofproto
->up
.name
,
8321 dpif_name(ofproto
->backer
->dpif
));
8323 "\tlookups: hit:%"PRIu64
" missed:%"PRIu64
"\n",
8324 ofproto
->n_hit
, ofproto
->n_missed
);
8325 ds_put_format(ds
, "\tflows: cur: %zu, avg: %5.3f, max: %d,"
8326 " life span: %llu(ms)\n",
8327 hmap_count(&ofproto
->subfacets
),
8328 avg_subfacet_count(ofproto
),
8329 ofproto
->max_n_subfacet
,
8330 avg_subfacet_life_span(ofproto
));
8331 if (minutes
>= 60) {
8332 show_dp_rates(ds
, "\t\thourly avg:", &ofproto
->hourly
);
8334 if (minutes
>= 60 * 24) {
8335 show_dp_rates(ds
, "\t\tdaily avg:", &ofproto
->daily
);
8337 show_dp_rates(ds
, "\t\toverall avg:", &lifetime
);
8339 ports
= shash_sort(&ofproto
->up
.port_by_name
);
8340 for (i
= 0; i
< shash_count(&ofproto
->up
.port_by_name
); i
++) {
8341 const struct shash_node
*node
= ports
[i
];
8342 struct ofport
*ofport
= node
->data
;
8343 const char *name
= netdev_get_name(ofport
->netdev
);
8344 const char *type
= netdev_get_type(ofport
->netdev
);
8347 ds_put_format(ds
, "\t%s %u/", name
, ofport
->ofp_port
);
8349 odp_port
= ofp_port_to_odp_port(ofproto
, ofport
->ofp_port
);
8350 if (odp_port
!= OVSP_NONE
) {
8351 ds_put_format(ds
, "%"PRIu32
":", odp_port
);
8353 ds_put_cstr(ds
, "none:");
8356 if (strcmp(type
, "system")) {
8357 struct netdev
*netdev
;
8360 ds_put_format(ds
, " (%s", type
);
8362 error
= netdev_open(name
, type
, &netdev
);
8367 error
= netdev_get_config(netdev
, &config
);
8369 const struct smap_node
**nodes
;
8372 nodes
= smap_sort(&config
);
8373 for (i
= 0; i
< smap_count(&config
); i
++) {
8374 const struct smap_node
*node
= nodes
[i
];
8375 ds_put_format(ds
, "%c %s=%s", i
? ',' : ':',
8376 node
->key
, node
->value
);
8380 smap_destroy(&config
);
8382 netdev_close(netdev
);
8384 ds_put_char(ds
, ')');
8386 ds_put_char(ds
, '\n');
8392 ofproto_unixctl_dpif_show(struct unixctl_conn
*conn
, int argc
,
8393 const char *argv
[], void *aux OVS_UNUSED
)
8395 struct ds ds
= DS_EMPTY_INITIALIZER
;
8396 const struct ofproto_dpif
*ofproto
;
8400 for (i
= 1; i
< argc
; i
++) {
8401 ofproto
= ofproto_dpif_lookup(argv
[i
]);
8403 ds_put_format(&ds
, "Unknown bridge %s (use dpif/dump-dps "
8404 "for help)", argv
[i
]);
8405 unixctl_command_reply_error(conn
, ds_cstr(&ds
));
8408 show_dp_format(ofproto
, &ds
);
8411 struct shash ofproto_shash
;
8412 const struct shash_node
**sorted_ofprotos
;
8415 shash_init(&ofproto_shash
);
8416 sorted_ofprotos
= get_ofprotos(&ofproto_shash
);
8417 for (i
= 0; i
< shash_count(&ofproto_shash
); i
++) {
8418 const struct shash_node
*node
= sorted_ofprotos
[i
];
8419 show_dp_format(node
->data
, &ds
);
8422 shash_destroy(&ofproto_shash
);
8423 free(sorted_ofprotos
);
8426 unixctl_command_reply(conn
, ds_cstr(&ds
));
8431 ofproto_unixctl_dpif_dump_flows(struct unixctl_conn
*conn
,
8432 int argc OVS_UNUSED
, const char *argv
[],
8433 void *aux OVS_UNUSED
)
8435 struct ds ds
= DS_EMPTY_INITIALIZER
;
8436 const struct ofproto_dpif
*ofproto
;
8437 struct subfacet
*subfacet
;
8439 ofproto
= ofproto_dpif_lookup(argv
[1]);
8441 unixctl_command_reply_error(conn
, "no such bridge");
8445 update_stats(ofproto
->backer
);
8447 HMAP_FOR_EACH (subfacet
, hmap_node
, &ofproto
->subfacets
) {
8448 struct facet
*facet
= subfacet
->facet
;
8450 odp_flow_key_format(subfacet
->key
, subfacet
->key_len
, &ds
);
8452 ds_put_format(&ds
, ", packets:%"PRIu64
", bytes:%"PRIu64
", used:",
8453 subfacet
->dp_packet_count
, subfacet
->dp_byte_count
);
8454 if (subfacet
->used
) {
8455 ds_put_format(&ds
, "%.3fs",
8456 (time_msec() - subfacet
->used
) / 1000.0);
8458 ds_put_format(&ds
, "never");
8460 if (subfacet
->facet
->tcp_flags
) {
8461 ds_put_cstr(&ds
, ", flags:");
8462 packet_format_tcp_flags(&ds
, subfacet
->facet
->tcp_flags
);
8465 ds_put_cstr(&ds
, ", actions:");
8466 if (facet
->xout
.slow
) {
8467 uint64_t slow_path_stub
[128 / 8];
8468 const struct nlattr
*actions
;
8471 compose_slow_path(ofproto
, &facet
->flow
, facet
->xout
.slow
,
8472 slow_path_stub
, sizeof slow_path_stub
,
8473 &actions
, &actions_len
);
8474 format_odp_actions(&ds
, actions
, actions_len
);
8476 format_odp_actions(&ds
, facet
->xout
.odp_actions
.data
,
8477 facet
->xout
.odp_actions
.size
);
8479 ds_put_char(&ds
, '\n');
8482 unixctl_command_reply(conn
, ds_cstr(&ds
));
8487 ofproto_unixctl_dpif_del_flows(struct unixctl_conn
*conn
,
8488 int argc OVS_UNUSED
, const char *argv
[],
8489 void *aux OVS_UNUSED
)
8491 struct ds ds
= DS_EMPTY_INITIALIZER
;
8492 struct ofproto_dpif
*ofproto
;
8494 ofproto
= ofproto_dpif_lookup(argv
[1]);
8496 unixctl_command_reply_error(conn
, "no such bridge");
8500 flush(&ofproto
->up
);
8502 unixctl_command_reply(conn
, ds_cstr(&ds
));
8507 ofproto_dpif_unixctl_init(void)
8509 static bool registered
;
8515 unixctl_command_register(
8517 "[dp_name]|bridge odp_flow|br_flow [-generate|packet]",
8518 1, 3, ofproto_unixctl_trace
, NULL
);
8519 unixctl_command_register("fdb/flush", "[bridge]", 0, 1,
8520 ofproto_unixctl_fdb_flush
, NULL
);
8521 unixctl_command_register("fdb/show", "bridge", 1, 1,
8522 ofproto_unixctl_fdb_show
, NULL
);
8523 unixctl_command_register("ofproto/clog", "", 0, 0,
8524 ofproto_dpif_clog
, NULL
);
8525 unixctl_command_register("ofproto/unclog", "", 0, 0,
8526 ofproto_dpif_unclog
, NULL
);
8527 unixctl_command_register("ofproto/self-check", "[bridge]", 0, 1,
8528 ofproto_dpif_self_check
, NULL
);
8529 unixctl_command_register("dpif/dump-dps", "", 0, 0,
8530 ofproto_unixctl_dpif_dump_dps
, NULL
);
8531 unixctl_command_register("dpif/show", "[bridge]", 0, INT_MAX
,
8532 ofproto_unixctl_dpif_show
, NULL
);
8533 unixctl_command_register("dpif/dump-flows", "bridge", 1, 1,
8534 ofproto_unixctl_dpif_dump_flows
, NULL
);
8535 unixctl_command_register("dpif/del-flows", "bridge", 1, 1,
8536 ofproto_unixctl_dpif_del_flows
, NULL
);
8539 /* Linux VLAN device support (e.g. "eth0.10" for VLAN 10.)
8541 * This is deprecated. It is only for compatibility with broken device drivers
8542 * in old versions of Linux that do not properly support VLANs when VLAN
8543 * devices are not used. When broken device drivers are no longer in
8544 * widespread use, we will delete these interfaces. */
8547 set_realdev(struct ofport
*ofport_
, uint16_t realdev_ofp_port
, int vid
)
8549 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofport_
->ofproto
);
8550 struct ofport_dpif
*ofport
= ofport_dpif_cast(ofport_
);
8552 if (realdev_ofp_port
== ofport
->realdev_ofp_port
8553 && vid
== ofport
->vlandev_vid
) {
8557 ofproto
->backer
->need_revalidate
= REV_RECONFIGURE
;
8559 if (ofport
->realdev_ofp_port
) {
8562 if (realdev_ofp_port
&& ofport
->bundle
) {
8563 /* vlandevs are enslaved to their realdevs, so they are not allowed to
8564 * themselves be part of a bundle. */
8565 bundle_set(ofport
->up
.ofproto
, ofport
->bundle
, NULL
);
8568 ofport
->realdev_ofp_port
= realdev_ofp_port
;
8569 ofport
->vlandev_vid
= vid
;
8571 if (realdev_ofp_port
) {
8572 vsp_add(ofport
, realdev_ofp_port
, vid
);
8579 hash_realdev_vid(uint16_t realdev_ofp_port
, int vid
)
8581 return hash_2words(realdev_ofp_port
, vid
);
8584 /* Returns the OFP port number of the Linux VLAN device that corresponds to
8585 * 'vlan_tci' on the network device with port number 'realdev_ofp_port' in
8586 * 'struct ofport_dpif'. For example, given 'realdev_ofp_port' of eth0 and
8587 * 'vlan_tci' 9, it would return the port number of eth0.9.
8589 * Unless VLAN splinters are enabled for port 'realdev_ofp_port', this
8590 * function just returns its 'realdev_ofp_port' argument. */
8592 vsp_realdev_to_vlandev(const struct ofproto_dpif
*ofproto
,
8593 uint16_t realdev_ofp_port
, ovs_be16 vlan_tci
)
8595 if (!hmap_is_empty(&ofproto
->realdev_vid_map
)) {
8596 int vid
= vlan_tci_to_vid(vlan_tci
);
8597 const struct vlan_splinter
*vsp
;
8599 HMAP_FOR_EACH_WITH_HASH (vsp
, realdev_vid_node
,
8600 hash_realdev_vid(realdev_ofp_port
, vid
),
8601 &ofproto
->realdev_vid_map
) {
8602 if (vsp
->realdev_ofp_port
== realdev_ofp_port
8603 && vsp
->vid
== vid
) {
8604 return vsp
->vlandev_ofp_port
;
8608 return realdev_ofp_port
;
8611 static struct vlan_splinter
*
8612 vlandev_find(const struct ofproto_dpif
*ofproto
, uint16_t vlandev_ofp_port
)
8614 struct vlan_splinter
*vsp
;
8616 HMAP_FOR_EACH_WITH_HASH (vsp
, vlandev_node
, hash_int(vlandev_ofp_port
, 0),
8617 &ofproto
->vlandev_map
) {
8618 if (vsp
->vlandev_ofp_port
== vlandev_ofp_port
) {
8626 /* Returns the OpenFlow port number of the "real" device underlying the Linux
8627 * VLAN device with OpenFlow port number 'vlandev_ofp_port' and stores the
8628 * VLAN VID of the Linux VLAN device in '*vid'. For example, given
8629 * 'vlandev_ofp_port' of eth0.9, it would return the OpenFlow port number of
8630 * eth0 and store 9 in '*vid'.
8632 * Returns 0 and does not modify '*vid' if 'vlandev_ofp_port' is not a Linux
8633 * VLAN device. Unless VLAN splinters are enabled, this is what this function
8636 vsp_vlandev_to_realdev(const struct ofproto_dpif
*ofproto
,
8637 uint16_t vlandev_ofp_port
, int *vid
)
8639 if (!hmap_is_empty(&ofproto
->vlandev_map
)) {
8640 const struct vlan_splinter
*vsp
;
8642 vsp
= vlandev_find(ofproto
, vlandev_ofp_port
);
8647 return vsp
->realdev_ofp_port
;
8653 /* Given 'flow', a flow representing a packet received on 'ofproto', checks
8654 * whether 'flow->in_port' represents a Linux VLAN device. If so, changes
8655 * 'flow->in_port' to the "real" device backing the VLAN device, sets
8656 * 'flow->vlan_tci' to the VLAN VID, and returns true. Otherwise (which is
8657 * always the case unless VLAN splinters are enabled), returns false without
8658 * making any changes. */
8660 vsp_adjust_flow(const struct ofproto_dpif
*ofproto
, struct flow
*flow
)
8665 realdev
= vsp_vlandev_to_realdev(ofproto
, flow
->in_port
, &vid
);
8670 /* Cause the flow to be processed as if it came in on the real device with
8671 * the VLAN device's VLAN ID. */
8672 flow
->in_port
= realdev
;
8673 flow
->vlan_tci
= htons((vid
& VLAN_VID_MASK
) | VLAN_CFI
);
8678 vsp_remove(struct ofport_dpif
*port
)
8680 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(port
->up
.ofproto
);
8681 struct vlan_splinter
*vsp
;
8683 vsp
= vlandev_find(ofproto
, port
->up
.ofp_port
);
8685 hmap_remove(&ofproto
->vlandev_map
, &vsp
->vlandev_node
);
8686 hmap_remove(&ofproto
->realdev_vid_map
, &vsp
->realdev_vid_node
);
8689 port
->realdev_ofp_port
= 0;
8691 VLOG_ERR("missing vlan device record");
8696 vsp_add(struct ofport_dpif
*port
, uint16_t realdev_ofp_port
, int vid
)
8698 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(port
->up
.ofproto
);
8700 if (!vsp_vlandev_to_realdev(ofproto
, port
->up
.ofp_port
, NULL
)
8701 && (vsp_realdev_to_vlandev(ofproto
, realdev_ofp_port
, htons(vid
))
8702 == realdev_ofp_port
)) {
8703 struct vlan_splinter
*vsp
;
8705 vsp
= xmalloc(sizeof *vsp
);
8706 hmap_insert(&ofproto
->vlandev_map
, &vsp
->vlandev_node
,
8707 hash_int(port
->up
.ofp_port
, 0));
8708 hmap_insert(&ofproto
->realdev_vid_map
, &vsp
->realdev_vid_node
,
8709 hash_realdev_vid(realdev_ofp_port
, vid
));
8710 vsp
->realdev_ofp_port
= realdev_ofp_port
;
8711 vsp
->vlandev_ofp_port
= port
->up
.ofp_port
;
8714 port
->realdev_ofp_port
= realdev_ofp_port
;
8716 VLOG_ERR("duplicate vlan device record");
8721 ofp_port_to_odp_port(const struct ofproto_dpif
*ofproto
, uint16_t ofp_port
)
8723 const struct ofport_dpif
*ofport
= get_ofp_port(ofproto
, ofp_port
);
8724 return ofport
? ofport
->odp_port
: OVSP_NONE
;
8727 static struct ofport_dpif
*
8728 odp_port_to_ofport(const struct dpif_backer
*backer
, uint32_t odp_port
)
8730 struct ofport_dpif
*port
;
8732 HMAP_FOR_EACH_IN_BUCKET (port
, odp_port_node
,
8733 hash_int(odp_port
, 0),
8734 &backer
->odp_to_ofport_map
) {
8735 if (port
->odp_port
== odp_port
) {
8744 odp_port_to_ofp_port(const struct ofproto_dpif
*ofproto
, uint32_t odp_port
)
8746 struct ofport_dpif
*port
;
8748 port
= odp_port_to_ofport(ofproto
->backer
, odp_port
);
8749 if (port
&& &ofproto
->up
== port
->up
.ofproto
) {
8750 return port
->up
.ofp_port
;
8755 static unsigned long long int
8756 avg_subfacet_life_span(const struct ofproto_dpif
*ofproto
)
8758 unsigned long long int dc
;
8759 unsigned long long int avg
;
8761 dc
= ofproto
->total_subfacet_del_count
+ ofproto
->subfacet_del_count
;
8762 avg
= dc
? ofproto
->total_subfacet_life_span
/ dc
: 0;
8768 avg_subfacet_count(const struct ofproto_dpif
*ofproto
)
8772 if (ofproto
->n_update_stats
) {
8773 avg_c
= (double)ofproto
->total_subfacet_count
8774 / ofproto
->n_update_stats
;
8781 show_dp_rates(struct ds
*ds
, const char *heading
,
8782 const struct avg_subfacet_rates
*rates
)
8784 ds_put_format(ds
, "%s add rate: %5.3f/min, del rate: %5.3f/min\n",
8785 heading
, rates
->add_rate
, rates
->del_rate
);
8789 update_max_subfacet_count(struct ofproto_dpif
*ofproto
)
8791 ofproto
->max_n_subfacet
= MAX(ofproto
->max_n_subfacet
,
8792 hmap_count(&ofproto
->subfacets
));
8795 /* Compute exponentially weighted moving average, adding 'new' as the newest,
8796 * most heavily weighted element. 'base' designates the rate of decay: after
8797 * 'base' further updates, 'new''s weight in the EWMA decays to about 1/e
8800 exp_mavg(double *avg
, int base
, double new)
8802 *avg
= (*avg
* (base
- 1) + new) / base
;
8806 update_moving_averages(struct ofproto_dpif
*ofproto
)
8808 const int min_ms
= 60 * 1000; /* milliseconds in one minute. */
8810 /* Update hourly averages on the minute boundaries. */
8811 if (time_msec() - ofproto
->last_minute
>= min_ms
) {
8812 exp_mavg(&ofproto
->hourly
.add_rate
, 60, ofproto
->subfacet_add_count
);
8813 exp_mavg(&ofproto
->hourly
.del_rate
, 60, ofproto
->subfacet_del_count
);
8815 /* Update daily averages on the hour boundaries. */
8816 if ((ofproto
->last_minute
- ofproto
->created
) / min_ms
% 60 == 59) {
8817 exp_mavg(&ofproto
->daily
.add_rate
, 24, ofproto
->hourly
.add_rate
);
8818 exp_mavg(&ofproto
->daily
.del_rate
, 24, ofproto
->hourly
.del_rate
);
8821 ofproto
->total_subfacet_add_count
+= ofproto
->subfacet_add_count
;
8822 ofproto
->total_subfacet_del_count
+= ofproto
->subfacet_del_count
;
8823 ofproto
->subfacet_add_count
= 0;
8824 ofproto
->subfacet_del_count
= 0;
8825 ofproto
->last_minute
+= min_ms
;
8829 const struct ofproto_class ofproto_dpif_class
= {
8864 port_is_lacp_current
,
8865 NULL
, /* rule_choose_table */
8872 rule_modify_actions
,
8886 get_stp_port_status
,
8893 is_mirror_output_bundle
,
8894 forward_bpdu_changed
,
8895 set_mac_table_config
,